2024-12-09 01:51:16,981 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba 2024-12-09 01:51:16,993 main DEBUG Took 0.010109 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-12-09 01:51:16,994 main DEBUG PluginManager 'Core' found 129 plugins 2024-12-09 01:51:16,994 main DEBUG PluginManager 'Level' found 0 plugins 2024-12-09 01:51:16,995 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-12-09 01:51:16,996 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 01:51:17,015 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-12-09 01:51:17,030 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 01:51:17,032 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 01:51:17,032 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 01:51:17,033 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 01:51:17,033 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 01:51:17,034 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 01:51:17,035 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 01:51:17,035 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 01:51:17,036 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 01:51:17,037 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 01:51:17,038 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 01:51:17,038 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 01:51:17,039 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 01:51:17,039 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 01:51:17,040 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 01:51:17,040 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 01:51:17,041 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 01:51:17,041 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 01:51:17,042 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 01:51:17,042 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 01:51:17,043 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 01:51:17,043 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 01:51:17,044 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 01:51:17,044 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 01:51:17,045 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 01:51:17,045 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-12-09 01:51:17,047 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 01:51:17,049 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-12-09 01:51:17,051 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-12-09 01:51:17,051 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-12-09 01:51:17,053 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-12-09 01:51:17,053 main DEBUG PluginManager 'Converter' found 47 plugins 2024-12-09 01:51:17,063 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-12-09 01:51:17,066 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-12-09 01:51:17,067 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-12-09 01:51:17,068 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-12-09 01:51:17,068 main DEBUG createAppenders(={Console}) 2024-12-09 01:51:17,069 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba initialized 2024-12-09 01:51:17,069 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba 2024-12-09 01:51:17,070 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba OK. 2024-12-09 01:51:17,071 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-12-09 01:51:17,071 main DEBUG OutputStream closed 2024-12-09 01:51:17,071 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-12-09 01:51:17,072 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-12-09 01:51:17,072 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@49c7b90e OK 2024-12-09 01:51:17,153 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-12-09 01:51:17,155 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-12-09 01:51:17,156 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-12-09 01:51:17,157 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-12-09 01:51:17,158 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-12-09 01:51:17,158 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-12-09 01:51:17,159 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-12-09 01:51:17,159 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-12-09 01:51:17,160 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-12-09 01:51:17,160 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-12-09 01:51:17,161 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-12-09 01:51:17,161 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-12-09 01:51:17,162 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-12-09 01:51:17,162 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-12-09 01:51:17,162 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-12-09 01:51:17,163 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-12-09 01:51:17,163 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-12-09 01:51:17,164 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-12-09 01:51:17,166 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-09 01:51:17,166 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-logging/target/hbase-logging-4.0.0-alpha-1-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@35432107) with optional ClassLoader: null 2024-12-09 01:51:17,167 main DEBUG Shutdown hook enabled. Registering a new one. 2024-12-09 01:51:17,167 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@35432107] started OK. 2024-12-09T01:51:17,184 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC timeout: 26 mins 2024-12-09 01:51:17,187 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-12-09 01:51:17,188 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-09T01:51:17,446 DEBUG [main {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5e934fc9-80e3-c528-8311-02add2e6ba80 2024-12-09T01:51:17,471 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5e934fc9-80e3-c528-8311-02add2e6ba80/cluster_a72df13a-320a-afec-fc01-7ad321829863, deleteOnExit=true 2024-12-09T01:51:17,472 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5e934fc9-80e3-c528-8311-02add2e6ba80/test.cache.data in system properties and HBase conf 2024-12-09T01:51:17,473 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5e934fc9-80e3-c528-8311-02add2e6ba80/hadoop.tmp.dir in system properties and HBase conf 2024-12-09T01:51:17,474 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5e934fc9-80e3-c528-8311-02add2e6ba80/hadoop.log.dir in system properties and HBase conf 2024-12-09T01:51:17,474 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5e934fc9-80e3-c528-8311-02add2e6ba80/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-09T01:51:17,475 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5e934fc9-80e3-c528-8311-02add2e6ba80/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-09T01:51:17,475 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-09T01:51:17,564 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-12-09T01:51:17,658 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-09T01:51:17,662 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5e934fc9-80e3-c528-8311-02add2e6ba80/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-09T01:51:17,662 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5e934fc9-80e3-c528-8311-02add2e6ba80/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-09T01:51:17,663 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5e934fc9-80e3-c528-8311-02add2e6ba80/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-09T01:51:17,663 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5e934fc9-80e3-c528-8311-02add2e6ba80/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-09T01:51:17,664 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5e934fc9-80e3-c528-8311-02add2e6ba80/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-09T01:51:17,664 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5e934fc9-80e3-c528-8311-02add2e6ba80/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-09T01:51:17,665 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5e934fc9-80e3-c528-8311-02add2e6ba80/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-09T01:51:17,665 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5e934fc9-80e3-c528-8311-02add2e6ba80/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-09T01:51:17,666 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5e934fc9-80e3-c528-8311-02add2e6ba80/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-09T01:51:17,666 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5e934fc9-80e3-c528-8311-02add2e6ba80/nfs.dump.dir in system properties and HBase conf 2024-12-09T01:51:17,666 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5e934fc9-80e3-c528-8311-02add2e6ba80/java.io.tmpdir in system properties and HBase conf 2024-12-09T01:51:17,667 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5e934fc9-80e3-c528-8311-02add2e6ba80/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-09T01:51:17,667 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5e934fc9-80e3-c528-8311-02add2e6ba80/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-09T01:51:17,667 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5e934fc9-80e3-c528-8311-02add2e6ba80/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-09T01:51:18,497 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-12-09T01:51:18,574 INFO [Time-limited test {}] log.Log(170): Logging initialized @2319ms to org.eclipse.jetty.util.log.Slf4jLog 2024-12-09T01:51:18,652 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T01:51:18,717 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T01:51:18,738 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T01:51:18,738 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T01:51:18,740 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-09T01:51:18,754 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T01:51:18,757 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4395d44b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5e934fc9-80e3-c528-8311-02add2e6ba80/hadoop.log.dir/,AVAILABLE} 2024-12-09T01:51:18,758 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4f93dd{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T01:51:18,967 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@58dbf239{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5e934fc9-80e3-c528-8311-02add2e6ba80/java.io.tmpdir/jetty-localhost-40543-hadoop-hdfs-3_4_1-tests_jar-_-any-14099613635106576432/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-09T01:51:18,975 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@13e2962d{HTTP/1.1, (http/1.1)}{localhost:40543} 2024-12-09T01:51:18,976 INFO [Time-limited test {}] server.Server(415): Started @2722ms 2024-12-09T01:51:19,405 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T01:51:19,413 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T01:51:19,415 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T01:51:19,416 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T01:51:19,416 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-09T01:51:19,420 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@431e53b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5e934fc9-80e3-c528-8311-02add2e6ba80/hadoop.log.dir/,AVAILABLE} 2024-12-09T01:51:19,421 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4dc262e0{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T01:51:19,549 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@65462677{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5e934fc9-80e3-c528-8311-02add2e6ba80/java.io.tmpdir/jetty-localhost-33721-hadoop-hdfs-3_4_1-tests_jar-_-any-15175596315388970982/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T01:51:19,550 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@383014b{HTTP/1.1, (http/1.1)}{localhost:33721} 2024-12-09T01:51:19,550 INFO [Time-limited test {}] server.Server(415): Started @3297ms 2024-12-09T01:51:19,606 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T01:51:19,735 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T01:51:19,740 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T01:51:19,748 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T01:51:19,748 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T01:51:19,749 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-09T01:51:19,751 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@444b27d4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5e934fc9-80e3-c528-8311-02add2e6ba80/hadoop.log.dir/,AVAILABLE} 2024-12-09T01:51:19,752 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6af5a446{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T01:51:19,893 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@513cab2c{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5e934fc9-80e3-c528-8311-02add2e6ba80/java.io.tmpdir/jetty-localhost-32829-hadoop-hdfs-3_4_1-tests_jar-_-any-2293374693196928668/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T01:51:19,894 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@29a123ec{HTTP/1.1, (http/1.1)}{localhost:32829} 2024-12-09T01:51:19,894 INFO [Time-limited test {}] server.Server(415): Started @3640ms 2024-12-09T01:51:19,896 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T01:51:19,948 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T01:51:19,955 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T01:51:19,963 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T01:51:19,963 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T01:51:19,964 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-09T01:51:19,965 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@35e2f174{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5e934fc9-80e3-c528-8311-02add2e6ba80/hadoop.log.dir/,AVAILABLE} 2024-12-09T01:51:19,966 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@343b36c2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T01:51:20,059 WARN [Thread-106 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5e934fc9-80e3-c528-8311-02add2e6ba80/cluster_a72df13a-320a-afec-fc01-7ad321829863/data/data3/current/BP-559585578-172.17.0.2-1733709078255/current, will proceed with Du for space computation calculation, 2024-12-09T01:51:20,059 WARN [Thread-108 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5e934fc9-80e3-c528-8311-02add2e6ba80/cluster_a72df13a-320a-afec-fc01-7ad321829863/data/data4/current/BP-559585578-172.17.0.2-1733709078255/current, will proceed with Du for space computation calculation, 2024-12-09T01:51:20,059 WARN [Thread-107 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5e934fc9-80e3-c528-8311-02add2e6ba80/cluster_a72df13a-320a-afec-fc01-7ad321829863/data/data2/current/BP-559585578-172.17.0.2-1733709078255/current, will proceed with Du for space computation calculation, 2024-12-09T01:51:20,059 WARN [Thread-105 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5e934fc9-80e3-c528-8311-02add2e6ba80/cluster_a72df13a-320a-afec-fc01-7ad321829863/data/data1/current/BP-559585578-172.17.0.2-1733709078255/current, will proceed with Du for space computation calculation, 2024-12-09T01:51:20,108 WARN [Thread-82 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T01:51:20,109 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T01:51:20,121 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@653e6301{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5e934fc9-80e3-c528-8311-02add2e6ba80/java.io.tmpdir/jetty-localhost-46471-hadoop-hdfs-3_4_1-tests_jar-_-any-14184699641559876066/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T01:51:20,122 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@404caff2{HTTP/1.1, (http/1.1)}{localhost:46471} 2024-12-09T01:51:20,122 INFO [Time-limited test {}] server.Server(415): Started @3869ms 2024-12-09T01:51:20,126 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T01:51:20,185 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x5d14185ead5a5601 with lease ID 0x6e4e85723dc6a5cd: Processing first storage report for DS-26a06a3a-54a9-41d5-830b-1d5e13bbbc7f from datanode DatanodeRegistration(127.0.0.1:35415, datanodeUuid=af8a4047-b43f-4e4a-a3ed-218f5a14e69e, infoPort=35427, infoSecurePort=0, ipcPort=39837, storageInfo=lv=-57;cid=testClusterID;nsid=348972446;c=1733709078255) 2024-12-09T01:51:20,187 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x5d14185ead5a5601 with lease ID 0x6e4e85723dc6a5cd: from storage DS-26a06a3a-54a9-41d5-830b-1d5e13bbbc7f node DatanodeRegistration(127.0.0.1:35415, datanodeUuid=af8a4047-b43f-4e4a-a3ed-218f5a14e69e, infoPort=35427, infoSecurePort=0, ipcPort=39837, storageInfo=lv=-57;cid=testClusterID;nsid=348972446;c=1733709078255), blocks: 0, hasStaleStorage: true, processing time: 2 msecs, invalidatedBlocks: 0 2024-12-09T01:51:20,188 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x3344fc08e5d600ec with lease ID 0x6e4e85723dc6a5cc: Processing first storage report for DS-713922a2-6e91-4c11-b1c8-464cc610afc0 from datanode DatanodeRegistration(127.0.0.1:40555, datanodeUuid=eeced6e1-2a67-42be-81fd-fc091dd60af3, infoPort=38907, infoSecurePort=0, ipcPort=40943, storageInfo=lv=-57;cid=testClusterID;nsid=348972446;c=1733709078255) 2024-12-09T01:51:20,188 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x3344fc08e5d600ec with lease ID 0x6e4e85723dc6a5cc: from storage DS-713922a2-6e91-4c11-b1c8-464cc610afc0 node DatanodeRegistration(127.0.0.1:40555, datanodeUuid=eeced6e1-2a67-42be-81fd-fc091dd60af3, infoPort=38907, infoSecurePort=0, ipcPort=40943, storageInfo=lv=-57;cid=testClusterID;nsid=348972446;c=1733709078255), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-09T01:51:20,188 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x5d14185ead5a5601 with lease ID 0x6e4e85723dc6a5cd: Processing first storage report for DS-c53bbec3-a9c4-4bde-bb27-9c597a984b3b from datanode DatanodeRegistration(127.0.0.1:35415, datanodeUuid=af8a4047-b43f-4e4a-a3ed-218f5a14e69e, infoPort=35427, infoSecurePort=0, ipcPort=39837, storageInfo=lv=-57;cid=testClusterID;nsid=348972446;c=1733709078255) 2024-12-09T01:51:20,189 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x5d14185ead5a5601 with lease ID 0x6e4e85723dc6a5cd: from storage DS-c53bbec3-a9c4-4bde-bb27-9c597a984b3b node DatanodeRegistration(127.0.0.1:35415, datanodeUuid=af8a4047-b43f-4e4a-a3ed-218f5a14e69e, infoPort=35427, infoSecurePort=0, ipcPort=39837, storageInfo=lv=-57;cid=testClusterID;nsid=348972446;c=1733709078255), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T01:51:20,189 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x3344fc08e5d600ec with lease ID 0x6e4e85723dc6a5cc: Processing first storage report for DS-a51284fc-9ca2-4b97-9c40-305d3d24b493 from datanode DatanodeRegistration(127.0.0.1:40555, datanodeUuid=eeced6e1-2a67-42be-81fd-fc091dd60af3, infoPort=38907, infoSecurePort=0, ipcPort=40943, storageInfo=lv=-57;cid=testClusterID;nsid=348972446;c=1733709078255) 2024-12-09T01:51:20,189 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x3344fc08e5d600ec with lease ID 0x6e4e85723dc6a5cc: from storage DS-a51284fc-9ca2-4b97-9c40-305d3d24b493 node DatanodeRegistration(127.0.0.1:40555, datanodeUuid=eeced6e1-2a67-42be-81fd-fc091dd60af3, infoPort=38907, infoSecurePort=0, ipcPort=40943, storageInfo=lv=-57;cid=testClusterID;nsid=348972446;c=1733709078255), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T01:51:20,262 WARN [Thread-139 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5e934fc9-80e3-c528-8311-02add2e6ba80/cluster_a72df13a-320a-afec-fc01-7ad321829863/data/data5/current/BP-559585578-172.17.0.2-1733709078255/current, will proceed with Du for space computation calculation, 2024-12-09T01:51:20,264 WARN [Thread-140 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5e934fc9-80e3-c528-8311-02add2e6ba80/cluster_a72df13a-320a-afec-fc01-7ad321829863/data/data6/current/BP-559585578-172.17.0.2-1733709078255/current, will proceed with Du for space computation calculation, 2024-12-09T01:51:20,293 WARN [Thread-129 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T01:51:20,300 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x4b877e20ac89082f with lease ID 0x6e4e85723dc6a5ce: Processing first storage report for DS-2705aa78-68cb-4415-a347-4b6277a1d45b from datanode DatanodeRegistration(127.0.0.1:46601, datanodeUuid=52791522-8ac7-4c11-95a1-5eccf43a6261, infoPort=39577, infoSecurePort=0, ipcPort=41599, storageInfo=lv=-57;cid=testClusterID;nsid=348972446;c=1733709078255) 2024-12-09T01:51:20,300 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x4b877e20ac89082f with lease ID 0x6e4e85723dc6a5ce: from storage DS-2705aa78-68cb-4415-a347-4b6277a1d45b node DatanodeRegistration(127.0.0.1:46601, datanodeUuid=52791522-8ac7-4c11-95a1-5eccf43a6261, infoPort=39577, infoSecurePort=0, ipcPort=41599, storageInfo=lv=-57;cid=testClusterID;nsid=348972446;c=1733709078255), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-09T01:51:20,300 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x4b877e20ac89082f with lease ID 0x6e4e85723dc6a5ce: Processing first storage report for DS-fee4e6e1-e916-41d2-bcc0-17214585b4d2 from datanode DatanodeRegistration(127.0.0.1:46601, datanodeUuid=52791522-8ac7-4c11-95a1-5eccf43a6261, infoPort=39577, infoSecurePort=0, ipcPort=41599, storageInfo=lv=-57;cid=testClusterID;nsid=348972446;c=1733709078255) 2024-12-09T01:51:20,301 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x4b877e20ac89082f with lease ID 0x6e4e85723dc6a5ce: from storage DS-fee4e6e1-e916-41d2-bcc0-17214585b4d2 node DatanodeRegistration(127.0.0.1:46601, datanodeUuid=52791522-8ac7-4c11-95a1-5eccf43a6261, infoPort=39577, infoSecurePort=0, ipcPort=41599, storageInfo=lv=-57;cid=testClusterID;nsid=348972446;c=1733709078255), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T01:51:20,617 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5e934fc9-80e3-c528-8311-02add2e6ba80 2024-12-09T01:51:20,701 WARN [Time-limited test {}] erasurecode.ErasureCodeNative(55): ISA-L support is not available in your platform... using builtin-java codec where applicable 2024-12-09T01:51:20,759 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestHBaseWalOnEC#testReadWrite[0] Thread=158, OpenFileDescriptor=391, MaxFileDescriptor=1048576, SystemLoadAverage=255, ProcessCount=11, AvailableMemoryMB=12031 2024-12-09T01:51:20,761 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=3, rsPorts=, rsClass=null, numDataNodes=3, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-09T01:51:20,772 INFO [Time-limited test {}] hbase.HBaseTestingUtil(821): NOT STARTING DFS 2024-12-09T01:51:20,856 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5e934fc9-80e3-c528-8311-02add2e6ba80/cluster_a72df13a-320a-afec-fc01-7ad321829863/zookeeper_0, clientPort=49937, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5e934fc9-80e3-c528-8311-02add2e6ba80/cluster_a72df13a-320a-afec-fc01-7ad321829863/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5e934fc9-80e3-c528-8311-02add2e6ba80/cluster_a72df13a-320a-afec-fc01-7ad321829863/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-09T01:51:20,866 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=49937 2024-12-09T01:51:20,881 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T01:51:20,885 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T01:51:21,015 WARN [Time-limited test {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T01:51:21,016 WARN [Time-limited test {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T01:51:21,069 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-2078043917_22 at /127.0.0.1:49204 [Receiving block BP-559585578-172.17.0.2-1733709078255:blk_-9223372036854775792_1001] {}] datanode.DataXceiver(331): 127.0.0.1:35415:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:49204 dst: /127.0.0.1:35415 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T01:51:21,104 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35415 is added to blk_-9223372036854775792_1002 (size=7) 2024-12-09T01:51:21,493 WARN [Time-limited test {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-09T01:51:21,503 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:41875/user/jenkins/test-data/c531f78b-4684-755c-e834-db15031f05c3 with version=8 2024-12-09T01:51:21,503 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:41875/user/jenkins/test-data/c531f78b-4684-755c-e834-db15031f05c3/hbase-staging 2024-12-09T01:51:21,601 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-12-09T01:51:21,860 INFO [Time-limited test {}] client.ConnectionUtils(128): master/ef6f18c58dc9:0 server-side Connection retries=45 2024-12-09T01:51:21,871 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T01:51:21,871 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-09T01:51:21,876 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-09T01:51:21,876 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T01:51:21,876 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-09T01:51:22,012 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-09T01:51:22,075 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-12-09T01:51:22,084 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-12-09T01:51:22,088 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-09T01:51:22,115 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 8404 (auto-detected) 2024-12-09T01:51:22,117 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-12-09T01:51:22,135 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:34719 2024-12-09T01:51:22,157 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:34719 connecting to ZooKeeper ensemble=127.0.0.1:49937 2024-12-09T01:51:22,190 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:347190x0, quorum=127.0.0.1:49937, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-09T01:51:22,193 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:34719-0x1007478e1e60000 connected 2024-12-09T01:51:22,223 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T01:51:22,226 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T01:51:22,237 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:34719-0x1007478e1e60000, quorum=127.0.0.1:49937, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T01:51:22,242 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:41875/user/jenkins/test-data/c531f78b-4684-755c-e834-db15031f05c3, hbase.cluster.distributed=false 2024-12-09T01:51:22,266 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:34719-0x1007478e1e60000, quorum=127.0.0.1:49937, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-09T01:51:22,271 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=34719 2024-12-09T01:51:22,272 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=34719 2024-12-09T01:51:22,274 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=34719 2024-12-09T01:51:22,274 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=34719 2024-12-09T01:51:22,275 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=34719 2024-12-09T01:51:22,393 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/ef6f18c58dc9:0 server-side Connection retries=45 2024-12-09T01:51:22,395 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T01:51:22,395 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-09T01:51:22,396 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-09T01:51:22,396 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T01:51:22,396 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-09T01:51:22,399 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-09T01:51:22,403 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-09T01:51:22,404 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:35707 2024-12-09T01:51:22,407 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:35707 connecting to ZooKeeper ensemble=127.0.0.1:49937 2024-12-09T01:51:22,408 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T01:51:22,412 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T01:51:22,418 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:357070x0, quorum=127.0.0.1:49937, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-09T01:51:22,418 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:35707-0x1007478e1e60001 connected 2024-12-09T01:51:22,419 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35707-0x1007478e1e60001, quorum=127.0.0.1:49937, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T01:51:22,424 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-09T01:51:22,433 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-09T01:51:22,436 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35707-0x1007478e1e60001, quorum=127.0.0.1:49937, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-09T01:51:22,441 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35707-0x1007478e1e60001, quorum=127.0.0.1:49937, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-09T01:51:22,442 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=35707 2024-12-09T01:51:22,442 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=35707 2024-12-09T01:51:22,443 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=35707 2024-12-09T01:51:22,443 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=35707 2024-12-09T01:51:22,444 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=35707 2024-12-09T01:51:22,462 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/ef6f18c58dc9:0 server-side Connection retries=45 2024-12-09T01:51:22,462 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T01:51:22,462 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-09T01:51:22,463 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-09T01:51:22,463 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T01:51:22,463 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-09T01:51:22,464 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-09T01:51:22,464 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-09T01:51:22,465 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:37065 2024-12-09T01:51:22,467 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:37065 connecting to ZooKeeper ensemble=127.0.0.1:49937 2024-12-09T01:51:22,469 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T01:51:22,472 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T01:51:22,479 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:370650x0, quorum=127.0.0.1:49937, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-09T01:51:22,479 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:37065-0x1007478e1e60002 connected 2024-12-09T01:51:22,480 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37065-0x1007478e1e60002, quorum=127.0.0.1:49937, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T01:51:22,480 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-09T01:51:22,484 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-09T01:51:22,485 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37065-0x1007478e1e60002, quorum=127.0.0.1:49937, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-09T01:51:22,487 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37065-0x1007478e1e60002, quorum=127.0.0.1:49937, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-09T01:51:22,487 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=37065 2024-12-09T01:51:22,488 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=37065 2024-12-09T01:51:22,492 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=37065 2024-12-09T01:51:22,493 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=37065 2024-12-09T01:51:22,493 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=37065 2024-12-09T01:51:22,511 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/ef6f18c58dc9:0 server-side Connection retries=45 2024-12-09T01:51:22,511 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T01:51:22,512 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-09T01:51:22,512 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-09T01:51:22,512 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T01:51:22,512 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-09T01:51:22,512 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-09T01:51:22,512 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-09T01:51:22,513 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:41167 2024-12-09T01:51:22,515 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:41167 connecting to ZooKeeper ensemble=127.0.0.1:49937 2024-12-09T01:51:22,516 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T01:51:22,518 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T01:51:22,524 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:411670x0, quorum=127.0.0.1:49937, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-09T01:51:22,525 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:41167-0x1007478e1e60003 connected 2024-12-09T01:51:22,525 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41167-0x1007478e1e60003, quorum=127.0.0.1:49937, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T01:51:22,526 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-09T01:51:22,528 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-09T01:51:22,529 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41167-0x1007478e1e60003, quorum=127.0.0.1:49937, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-09T01:51:22,531 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41167-0x1007478e1e60003, quorum=127.0.0.1:49937, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-09T01:51:22,532 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=41167 2024-12-09T01:51:22,536 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=41167 2024-12-09T01:51:22,536 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=41167 2024-12-09T01:51:22,537 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=41167 2024-12-09T01:51:22,539 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=41167 2024-12-09T01:51:22,553 DEBUG [M:0;ef6f18c58dc9:34719 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;ef6f18c58dc9:34719 2024-12-09T01:51:22,555 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/ef6f18c58dc9,34719,1733709081654 2024-12-09T01:51:22,562 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34719-0x1007478e1e60000, quorum=127.0.0.1:49937, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T01:51:22,562 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41167-0x1007478e1e60003, quorum=127.0.0.1:49937, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T01:51:22,562 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35707-0x1007478e1e60001, quorum=127.0.0.1:49937, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T01:51:22,562 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37065-0x1007478e1e60002, quorum=127.0.0.1:49937, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T01:51:22,565 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:34719-0x1007478e1e60000, quorum=127.0.0.1:49937, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/ef6f18c58dc9,34719,1733709081654 2024-12-09T01:51:22,585 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41167-0x1007478e1e60003, quorum=127.0.0.1:49937, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-09T01:51:22,586 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35707-0x1007478e1e60001, quorum=127.0.0.1:49937, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-09T01:51:22,586 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34719-0x1007478e1e60000, quorum=127.0.0.1:49937, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T01:51:22,586 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37065-0x1007478e1e60002, quorum=127.0.0.1:49937, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-09T01:51:22,586 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41167-0x1007478e1e60003, quorum=127.0.0.1:49937, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T01:51:22,586 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35707-0x1007478e1e60001, quorum=127.0.0.1:49937, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T01:51:22,586 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37065-0x1007478e1e60002, quorum=127.0.0.1:49937, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T01:51:22,588 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:34719-0x1007478e1e60000, quorum=127.0.0.1:49937, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-09T01:51:22,589 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/ef6f18c58dc9,34719,1733709081654 from backup master directory 2024-12-09T01:51:22,593 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41167-0x1007478e1e60003, quorum=127.0.0.1:49937, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T01:51:22,593 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35707-0x1007478e1e60001, quorum=127.0.0.1:49937, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T01:51:22,593 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34719-0x1007478e1e60000, quorum=127.0.0.1:49937, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/ef6f18c58dc9,34719,1733709081654 2024-12-09T01:51:22,593 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37065-0x1007478e1e60002, quorum=127.0.0.1:49937, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T01:51:22,593 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34719-0x1007478e1e60000, quorum=127.0.0.1:49937, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T01:51:22,594 WARN [master/ef6f18c58dc9:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-09T01:51:22,594 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=ef6f18c58dc9,34719,1733709081654 2024-12-09T01:51:22,597 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-12-09T01:51:22,599 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-12-09T01:51:22,660 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:41875/user/jenkins/test-data/c531f78b-4684-755c-e834-db15031f05c3/hbase.id] with ID: 2c4a86d9-d0c3-48a2-8f3e-d1a975e4d197 2024-12-09T01:51:22,660 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:41875/user/jenkins/test-data/c531f78b-4684-755c-e834-db15031f05c3/.tmp/hbase.id 2024-12-09T01:51:22,668 WARN [master/ef6f18c58dc9:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T01:51:22,668 WARN [master/ef6f18c58dc9:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T01:51:22,671 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-2078043917_22 at /127.0.0.1:34330 [Receiving block BP-559585578-172.17.0.2-1733709078255:blk_-9223372036854775776_1003] {}] datanode.DataXceiver(331): 127.0.0.1:46601:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34330 dst: /127.0.0.1:46601 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T01:51:22,677 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46601 is added to blk_-9223372036854775776_1004 (size=42) 2024-12-09T01:51:22,678 WARN [master/ef6f18c58dc9:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-09T01:51:22,678 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:41875/user/jenkins/test-data/c531f78b-4684-755c-e834-db15031f05c3/.tmp/hbase.id]:[hdfs://localhost:41875/user/jenkins/test-data/c531f78b-4684-755c-e834-db15031f05c3/hbase.id] 2024-12-09T01:51:22,724 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T01:51:22,729 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-09T01:51:22,748 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 17ms. 2024-12-09T01:51:22,752 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34719-0x1007478e1e60000, quorum=127.0.0.1:49937, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T01:51:22,752 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35707-0x1007478e1e60001, quorum=127.0.0.1:49937, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T01:51:22,752 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37065-0x1007478e1e60002, quorum=127.0.0.1:49937, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T01:51:22,752 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41167-0x1007478e1e60003, quorum=127.0.0.1:49937, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T01:51:22,764 WARN [master/ef6f18c58dc9:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T01:51:22,764 WARN [master/ef6f18c58dc9:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T01:51:22,767 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-2078043917_22 at /127.0.0.1:59238 [Receiving block BP-559585578-172.17.0.2-1733709078255:blk_-9223372036854775760_1005] {}] datanode.DataXceiver(331): 127.0.0.1:35415:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:59238 dst: /127.0.0.1:35415 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T01:51:22,772 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35415 is added to blk_-9223372036854775760_1006 (size=196) 2024-12-09T01:51:22,773 WARN [master/ef6f18c58dc9:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-09T01:51:22,789 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-09T01:51:22,791 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-09T01:51:22,798 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-09T01:51:22,827 WARN [master/ef6f18c58dc9:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T01:51:22,827 WARN [master/ef6f18c58dc9:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T01:51:22,830 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-2078043917_22 at /127.0.0.1:59258 [Receiving block BP-559585578-172.17.0.2-1733709078255:blk_-9223372036854775744_1007] {}] datanode.DataXceiver(331): 127.0.0.1:35415:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:59258 dst: /127.0.0.1:35415 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T01:51:22,837 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35415 is added to blk_-9223372036854775744_1008 (size=1189) 2024-12-09T01:51:22,838 WARN [master/ef6f18c58dc9:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-09T01:51:22,858 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:41875/user/jenkins/test-data/c531f78b-4684-755c-e834-db15031f05c3/MasterData/data/master/store 2024-12-09T01:51:22,876 WARN [master/ef6f18c58dc9:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T01:51:22,876 WARN [master/ef6f18c58dc9:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T01:51:22,882 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-2078043917_22 at /127.0.0.1:59272 [Receiving block BP-559585578-172.17.0.2-1733709078255:blk_-9223372036854775728_1009] {}] datanode.DataXceiver(331): 127.0.0.1:35415:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:59272 dst: /127.0.0.1:35415 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T01:51:22,886 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35415 is added to blk_-9223372036854775728_1010 (size=34) 2024-12-09T01:51:22,889 WARN [master/ef6f18c58dc9:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-09T01:51:22,894 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-12-09T01:51:22,897 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T01:51:22,898 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-09T01:51:22,898 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T01:51:22,898 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T01:51:22,900 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-09T01:51:22,900 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T01:51:22,900 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T01:51:22,901 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733709082898Disabling compacts and flushes for region at 1733709082898Disabling writes for close at 1733709082900 (+2 ms)Writing region close event to WAL at 1733709082900Closed at 1733709082900 2024-12-09T01:51:22,903 WARN [master/ef6f18c58dc9:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:41875/user/jenkins/test-data/c531f78b-4684-755c-e834-db15031f05c3/MasterData/data/master/store/.initializing 2024-12-09T01:51:22,903 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:41875/user/jenkins/test-data/c531f78b-4684-755c-e834-db15031f05c3/MasterData/WALs/ef6f18c58dc9,34719,1733709081654 2024-12-09T01:51:22,912 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-09T01:51:22,928 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=ef6f18c58dc9%2C34719%2C1733709081654, suffix=, logDir=hdfs://localhost:41875/user/jenkins/test-data/c531f78b-4684-755c-e834-db15031f05c3/MasterData/WALs/ef6f18c58dc9,34719,1733709081654, archiveDir=hdfs://localhost:41875/user/jenkins/test-data/c531f78b-4684-755c-e834-db15031f05c3/MasterData/oldWALs, maxLogs=10 2024-12-09T01:51:22,965 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/c531f78b-4684-755c-e834-db15031f05c3/MasterData/WALs/ef6f18c58dc9,34719,1733709081654/ef6f18c58dc9%2C34719%2C1733709081654.1733709082933, exclude list is [], retry=0 2024-12-09T01:51:22,985 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(244): No decryptEncryptedDataEncryptionKey method in DFSClient, should be hadoop version with HDFS-12396 java.lang.NoSuchMethodException: org.apache.hadoop.hdfs.DFSClient.decryptEncryptedDataEncryptionKey(org.apache.hadoop.fs.FileEncryptionInfo) at java.lang.Class.getDeclaredMethod(Class.java:2675) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelperWithoutHDFS12396(FanOutOneBlockAsyncDFSOutputSaslHelper.java:183) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelper(FanOutOneBlockAsyncDFSOutputSaslHelper.java:242) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.(FanOutOneBlockAsyncDFSOutputSaslHelper.java:253) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.initialize(FanOutOneBlockAsyncDFSOutputHelper.java:413) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper$5.operationComplete(FanOutOneBlockAsyncDFSOutputHelper.java:472) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper$5.operationComplete(FanOutOneBlockAsyncDFSOutputHelper.java:467) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.NettyFutureUtils.lambda$addListener$0(NettyFutureUtils.java:56) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListener0(DefaultPromise.java:590) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListeners0(DefaultPromise.java:583) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListenersNow(DefaultPromise.java:559) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListeners(DefaultPromise.java:492) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.setValue0(DefaultPromise.java:636) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.setSuccess0(DefaultPromise.java:625) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.trySuccess(DefaultPromise.java:105) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPromise.trySuccess(DefaultChannelPromise.java:84) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.fulfillConnectPromise(AbstractEpollChannel.java:658) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.finishConnect(AbstractEpollChannel.java:696) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.epollOutReady(AbstractEpollChannel.java:567) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.processReady(EpollEventLoop.java:491) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:399) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T01:51:22,987 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:40555,DS-713922a2-6e91-4c11-b1c8-464cc610afc0,DISK] 2024-12-09T01:51:22,987 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46601,DS-2705aa78-68cb-4415-a347-4b6277a1d45b,DISK] 2024-12-09T01:51:22,987 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35415,DS-26a06a3a-54a9-41d5-830b-1d5e13bbbc7f,DISK] 2024-12-09T01:51:22,990 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.ProtobufDecoder(117): Hadoop 3.3 and above shades protobuf. 2024-12-09T01:51:23,030 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/c531f78b-4684-755c-e834-db15031f05c3/MasterData/WALs/ef6f18c58dc9,34719,1733709081654/ef6f18c58dc9%2C34719%2C1733709081654.1733709082933 2024-12-09T01:51:23,031 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:39577:39577),(127.0.0.1/127.0.0.1:38907:38907),(127.0.0.1/127.0.0.1:35427:35427)] 2024-12-09T01:51:23,031 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-09T01:51:23,032 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T01:51:23,035 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T01:51:23,036 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T01:51:23,079 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T01:51:23,108 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-09T01:51:23,112 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T01:51:23,115 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T01:51:23,116 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T01:51:23,119 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-09T01:51:23,119 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T01:51:23,121 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T01:51:23,121 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T01:51:23,124 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-09T01:51:23,125 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T01:51:23,126 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T01:51:23,126 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T01:51:23,129 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-09T01:51:23,129 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T01:51:23,130 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T01:51:23,131 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T01:51:23,134 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41875/user/jenkins/test-data/c531f78b-4684-755c-e834-db15031f05c3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-09T01:51:23,135 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41875/user/jenkins/test-data/c531f78b-4684-755c-e834-db15031f05c3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-09T01:51:23,141 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T01:51:23,141 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T01:51:23,145 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-09T01:51:23,148 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T01:51:23,154 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41875/user/jenkins/test-data/c531f78b-4684-755c-e834-db15031f05c3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T01:51:23,156 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=72435169, jitterRate=0.07936812937259674}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-09T01:51:23,163 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733709083049Initializing all the Stores at 1733709083052 (+3 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733709083052Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733709083053 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733709083053Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733709083053Cleaning up temporary data from old regions at 1733709083141 (+88 ms)Region opened successfully at 1733709083163 (+22 ms) 2024-12-09T01:51:23,164 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-09T01:51:23,201 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@74086339, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=ef6f18c58dc9/172.17.0.2:0 2024-12-09T01:51:23,234 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-09T01:51:23,246 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-09T01:51:23,246 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-09T01:51:23,249 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-09T01:51:23,250 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 1 msec 2024-12-09T01:51:23,256 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 5 msec 2024-12-09T01:51:23,256 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-09T01:51:23,283 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-09T01:51:23,294 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34719-0x1007478e1e60000, quorum=127.0.0.1:49937, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-09T01:51:23,297 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-09T01:51:23,299 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-09T01:51:23,301 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34719-0x1007478e1e60000, quorum=127.0.0.1:49937, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-09T01:51:23,302 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-09T01:51:23,305 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-09T01:51:23,309 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34719-0x1007478e1e60000, quorum=127.0.0.1:49937, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-09T01:51:23,311 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-09T01:51:23,313 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34719-0x1007478e1e60000, quorum=127.0.0.1:49937, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-09T01:51:23,315 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-09T01:51:23,327 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46601 is added to blk_-9223372036854775789_1002 (size=7) 2024-12-09T01:51:23,327 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40555 is added to blk_-9223372036854775788_1002 (size=7) 2024-12-09T01:51:23,334 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34719-0x1007478e1e60000, quorum=127.0.0.1:49937, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-09T01:51:23,336 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-09T01:51:23,341 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34719-0x1007478e1e60000, quorum=127.0.0.1:49937, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-09T01:51:23,341 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41167-0x1007478e1e60003, quorum=127.0.0.1:49937, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-09T01:51:23,341 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37065-0x1007478e1e60002, quorum=127.0.0.1:49937, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-09T01:51:23,341 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41167-0x1007478e1e60003, quorum=127.0.0.1:49937, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T01:51:23,341 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34719-0x1007478e1e60000, quorum=127.0.0.1:49937, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T01:51:23,341 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35707-0x1007478e1e60001, quorum=127.0.0.1:49937, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-09T01:51:23,341 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37065-0x1007478e1e60002, quorum=127.0.0.1:49937, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T01:51:23,342 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35707-0x1007478e1e60001, quorum=127.0.0.1:49937, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T01:51:23,344 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=ef6f18c58dc9,34719,1733709081654, sessionid=0x1007478e1e60000, setting cluster-up flag (Was=false) 2024-12-09T01:51:23,357 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41167-0x1007478e1e60003, quorum=127.0.0.1:49937, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T01:51:23,357 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35707-0x1007478e1e60001, quorum=127.0.0.1:49937, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T01:51:23,357 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34719-0x1007478e1e60000, quorum=127.0.0.1:49937, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T01:51:23,357 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37065-0x1007478e1e60002, quorum=127.0.0.1:49937, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T01:51:23,363 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-09T01:51:23,365 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=ef6f18c58dc9,34719,1733709081654 2024-12-09T01:51:23,371 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34719-0x1007478e1e60000, quorum=127.0.0.1:49937, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T01:51:23,371 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35707-0x1007478e1e60001, quorum=127.0.0.1:49937, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T01:51:23,371 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37065-0x1007478e1e60002, quorum=127.0.0.1:49937, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T01:51:23,371 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41167-0x1007478e1e60003, quorum=127.0.0.1:49937, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T01:51:23,378 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-09T01:51:23,379 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=ef6f18c58dc9,34719,1733709081654 2024-12-09T01:51:23,386 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:41875/user/jenkins/test-data/c531f78b-4684-755c-e834-db15031f05c3/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-09T01:51:23,443 INFO [RS:1;ef6f18c58dc9:37065 {}] regionserver.HRegionServer(746): ClusterId : 2c4a86d9-d0c3-48a2-8f3e-d1a975e4d197 2024-12-09T01:51:23,443 INFO [RS:2;ef6f18c58dc9:41167 {}] regionserver.HRegionServer(746): ClusterId : 2c4a86d9-d0c3-48a2-8f3e-d1a975e4d197 2024-12-09T01:51:23,443 INFO [RS:0;ef6f18c58dc9:35707 {}] regionserver.HRegionServer(746): ClusterId : 2c4a86d9-d0c3-48a2-8f3e-d1a975e4d197 2024-12-09T01:51:23,447 DEBUG [RS:0;ef6f18c58dc9:35707 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-09T01:51:23,447 DEBUG [RS:1;ef6f18c58dc9:37065 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-09T01:51:23,447 DEBUG [RS:2;ef6f18c58dc9:41167 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-09T01:51:23,453 DEBUG [RS:1;ef6f18c58dc9:37065 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-09T01:51:23,453 DEBUG [RS:2;ef6f18c58dc9:41167 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-09T01:51:23,453 DEBUG [RS:0;ef6f18c58dc9:35707 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-09T01:51:23,453 DEBUG [RS:1;ef6f18c58dc9:37065 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-09T01:51:23,453 DEBUG [RS:2;ef6f18c58dc9:41167 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-09T01:51:23,453 DEBUG [RS:0;ef6f18c58dc9:35707 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-09T01:51:23,457 DEBUG [RS:1;ef6f18c58dc9:37065 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-09T01:51:23,457 DEBUG [RS:2;ef6f18c58dc9:41167 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-09T01:51:23,457 DEBUG [RS:0;ef6f18c58dc9:35707 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-09T01:51:23,458 DEBUG [RS:1;ef6f18c58dc9:37065 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2d39603b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=ef6f18c58dc9/172.17.0.2:0 2024-12-09T01:51:23,458 DEBUG [RS:2;ef6f18c58dc9:41167 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4197aaa6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=ef6f18c58dc9/172.17.0.2:0 2024-12-09T01:51:23,458 DEBUG [RS:0;ef6f18c58dc9:35707 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6897be97, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=ef6f18c58dc9/172.17.0.2:0 2024-12-09T01:51:23,471 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-09T01:51:23,478 DEBUG [RS:0;ef6f18c58dc9:35707 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;ef6f18c58dc9:35707 2024-12-09T01:51:23,479 DEBUG [RS:1;ef6f18c58dc9:37065 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;ef6f18c58dc9:37065 2024-12-09T01:51:23,482 INFO [RS:1;ef6f18c58dc9:37065 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-09T01:51:23,482 INFO [RS:0;ef6f18c58dc9:35707 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-09T01:51:23,482 INFO [RS:1;ef6f18c58dc9:37065 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-09T01:51:23,482 INFO [RS:0;ef6f18c58dc9:35707 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-09T01:51:23,482 DEBUG [RS:1;ef6f18c58dc9:37065 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-09T01:51:23,482 DEBUG [RS:0;ef6f18c58dc9:35707 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-09T01:51:23,483 DEBUG [RS:2;ef6f18c58dc9:41167 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:2;ef6f18c58dc9:41167 2024-12-09T01:51:23,483 INFO [RS:2;ef6f18c58dc9:41167 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-09T01:51:23,483 INFO [RS:2;ef6f18c58dc9:41167 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-09T01:51:23,483 DEBUG [RS:2;ef6f18c58dc9:41167 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-09T01:51:23,484 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-09T01:51:23,485 INFO [RS:0;ef6f18c58dc9:35707 {}] regionserver.HRegionServer(2659): reportForDuty to master=ef6f18c58dc9,34719,1733709081654 with port=35707, startcode=1733709082350 2024-12-09T01:51:23,485 INFO [RS:1;ef6f18c58dc9:37065 {}] regionserver.HRegionServer(2659): reportForDuty to master=ef6f18c58dc9,34719,1733709081654 with port=37065, startcode=1733709082461 2024-12-09T01:51:23,485 INFO [RS:2;ef6f18c58dc9:41167 {}] regionserver.HRegionServer(2659): reportForDuty to master=ef6f18c58dc9,34719,1733709081654 with port=41167, startcode=1733709082511 2024-12-09T01:51:23,493 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-09T01:51:23,499 DEBUG [RS:1;ef6f18c58dc9:37065 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-09T01:51:23,499 DEBUG [RS:2;ef6f18c58dc9:41167 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-09T01:51:23,499 DEBUG [RS:0;ef6f18c58dc9:35707 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-09T01:51:23,500 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: ef6f18c58dc9,34719,1733709081654 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-09T01:51:23,509 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/ef6f18c58dc9:0, corePoolSize=5, maxPoolSize=5 2024-12-09T01:51:23,509 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/ef6f18c58dc9:0, corePoolSize=5, maxPoolSize=5 2024-12-09T01:51:23,509 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/ef6f18c58dc9:0, corePoolSize=5, maxPoolSize=5 2024-12-09T01:51:23,509 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/ef6f18c58dc9:0, corePoolSize=5, maxPoolSize=5 2024-12-09T01:51:23,509 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/ef6f18c58dc9:0, corePoolSize=10, maxPoolSize=10 2024-12-09T01:51:23,510 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/ef6f18c58dc9:0, corePoolSize=1, maxPoolSize=1 2024-12-09T01:51:23,510 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/ef6f18c58dc9:0, corePoolSize=2, maxPoolSize=2 2024-12-09T01:51:23,510 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/ef6f18c58dc9:0, corePoolSize=1, maxPoolSize=1 2024-12-09T01:51:23,520 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733709113520 2024-12-09T01:51:23,522 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-09T01:51:23,523 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-09T01:51:23,527 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-09T01:51:23,528 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-09T01:51:23,528 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-09T01:51:23,528 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-09T01:51:23,530 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-09T01:51:23,536 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-09T01:51:23,537 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-09T01:51:23,537 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-09T01:51:23,539 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-09T01:51:23,541 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-09T01:51:23,543 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-09T01:51:23,544 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-09T01:51:23,547 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/ef6f18c58dc9:0:becomeActiveMaster-HFileCleaner.large.0-1733709083545,5,FailOnTimeoutGroup] 2024-12-09T01:51:23,547 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T01:51:23,548 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-09T01:51:23,548 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/ef6f18c58dc9:0:becomeActiveMaster-HFileCleaner.small.0-1733709083547,5,FailOnTimeoutGroup] 2024-12-09T01:51:23,548 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-09T01:51:23,548 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-09T01:51:23,550 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-09T01:51:23,554 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58469, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-12-09T01:51:23,554 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34437, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-12-09T01:51:23,554 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53733, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-12-09T01:51:23,556 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-09T01:51:23,566 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T01:51:23,566 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34719 {}] master.ServerManager(363): Checking decommissioned status of RegionServer ef6f18c58dc9,37065,1733709082461 2024-12-09T01:51:23,566 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T01:51:23,569 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34719 {}] master.ServerManager(517): Registering regionserver=ef6f18c58dc9,37065,1733709082461 2024-12-09T01:51:23,581 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34719 {}] master.ServerManager(363): Checking decommissioned status of RegionServer ef6f18c58dc9,35707,1733709082350 2024-12-09T01:51:23,582 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34719 {}] master.ServerManager(517): Registering regionserver=ef6f18c58dc9,35707,1733709082350 2024-12-09T01:51:23,582 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-2078043917_22 at /127.0.0.1:38774 [Receiving block BP-559585578-172.17.0.2-1733709078255:blk_-9223372036854775712_1012] {}] datanode.DataXceiver(331): 127.0.0.1:40555:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:38774 dst: /127.0.0.1:40555 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T01:51:23,586 DEBUG [RS:1;ef6f18c58dc9:37065 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:41875/user/jenkins/test-data/c531f78b-4684-755c-e834-db15031f05c3 2024-12-09T01:51:23,587 DEBUG [RS:1;ef6f18c58dc9:37065 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:41875 2024-12-09T01:51:23,587 DEBUG [RS:1;ef6f18c58dc9:37065 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-09T01:51:23,588 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34719 {}] master.ServerManager(363): Checking decommissioned status of RegionServer ef6f18c58dc9,41167,1733709082511 2024-12-09T01:51:23,588 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34719 {}] master.ServerManager(517): Registering regionserver=ef6f18c58dc9,41167,1733709082511 2024-12-09T01:51:23,588 DEBUG [RS:0;ef6f18c58dc9:35707 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:41875/user/jenkins/test-data/c531f78b-4684-755c-e834-db15031f05c3 2024-12-09T01:51:23,589 DEBUG [RS:0;ef6f18c58dc9:35707 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:41875 2024-12-09T01:51:23,589 DEBUG [RS:0;ef6f18c58dc9:35707 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-09T01:51:23,590 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40555 is added to blk_-9223372036854775712_1013 (size=1321) 2024-12-09T01:51:23,592 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-09T01:51:23,594 DEBUG [RS:2;ef6f18c58dc9:41167 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:41875/user/jenkins/test-data/c531f78b-4684-755c-e834-db15031f05c3 2024-12-09T01:51:23,594 DEBUG [RS:2;ef6f18c58dc9:41167 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:41875 2024-12-09T01:51:23,594 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:41875/user/jenkins/test-data/c531f78b-4684-755c-e834-db15031f05c3/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-09T01:51:23,594 DEBUG [RS:2;ef6f18c58dc9:41167 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-09T01:51:23,595 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:41875/user/jenkins/test-data/c531f78b-4684-755c-e834-db15031f05c3 2024-12-09T01:51:23,597 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34719-0x1007478e1e60000, quorum=127.0.0.1:49937, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-09T01:51:23,599 DEBUG [RS:0;ef6f18c58dc9:35707 {}] zookeeper.ZKUtil(111): regionserver:35707-0x1007478e1e60001, quorum=127.0.0.1:49937, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/ef6f18c58dc9,35707,1733709082350 2024-12-09T01:51:23,599 WARN [RS:0;ef6f18c58dc9:35707 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-09T01:51:23,599 INFO [RS:0;ef6f18c58dc9:35707 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-09T01:51:23,599 DEBUG [RS:0;ef6f18c58dc9:35707 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:41875/user/jenkins/test-data/c531f78b-4684-755c-e834-db15031f05c3/WALs/ef6f18c58dc9,35707,1733709082350 2024-12-09T01:51:23,600 DEBUG [RS:1;ef6f18c58dc9:37065 {}] zookeeper.ZKUtil(111): regionserver:37065-0x1007478e1e60002, quorum=127.0.0.1:49937, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/ef6f18c58dc9,37065,1733709082461 2024-12-09T01:51:23,600 DEBUG [RS:2;ef6f18c58dc9:41167 {}] zookeeper.ZKUtil(111): regionserver:41167-0x1007478e1e60003, quorum=127.0.0.1:49937, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/ef6f18c58dc9,41167,1733709082511 2024-12-09T01:51:23,600 WARN [RS:1;ef6f18c58dc9:37065 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-09T01:51:23,600 WARN [RS:2;ef6f18c58dc9:41167 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-09T01:51:23,600 INFO [RS:2;ef6f18c58dc9:41167 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-09T01:51:23,600 INFO [RS:1;ef6f18c58dc9:37065 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-09T01:51:23,600 DEBUG [RS:1;ef6f18c58dc9:37065 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:41875/user/jenkins/test-data/c531f78b-4684-755c-e834-db15031f05c3/WALs/ef6f18c58dc9,37065,1733709082461 2024-12-09T01:51:23,600 DEBUG [RS:2;ef6f18c58dc9:41167 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:41875/user/jenkins/test-data/c531f78b-4684-755c-e834-db15031f05c3/WALs/ef6f18c58dc9,41167,1733709082511 2024-12-09T01:51:23,602 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [ef6f18c58dc9,37065,1733709082461] 2024-12-09T01:51:23,602 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [ef6f18c58dc9,41167,1733709082511] 2024-12-09T01:51:23,602 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [ef6f18c58dc9,35707,1733709082350] 2024-12-09T01:51:23,613 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T01:51:23,613 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T01:51:23,625 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-2078043917_22 at /127.0.0.1:34358 [Receiving block BP-559585578-172.17.0.2-1733709078255:blk_-9223372036854775696_1014] {}] datanode.DataXceiver(331): 127.0.0.1:46601:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34358 dst: /127.0.0.1:46601 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T01:51:23,632 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46601 is added to blk_-9223372036854775696_1015 (size=32) 2024-12-09T01:51:23,633 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-09T01:51:23,635 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T01:51:23,637 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-09T01:51:23,638 INFO [RS:2;ef6f18c58dc9:41167 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-09T01:51:23,638 INFO [RS:1;ef6f18c58dc9:37065 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-09T01:51:23,640 INFO [RS:0;ef6f18c58dc9:35707 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-09T01:51:23,640 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-09T01:51:23,640 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T01:51:23,641 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T01:51:23,641 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-09T01:51:23,644 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-09T01:51:23,644 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T01:51:23,645 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T01:51:23,645 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-09T01:51:23,647 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-09T01:51:23,647 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T01:51:23,648 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T01:51:23,649 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-09T01:51:23,654 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-09T01:51:23,654 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T01:51:23,655 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T01:51:23,655 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-09T01:51:23,657 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41875/user/jenkins/test-data/c531f78b-4684-755c-e834-db15031f05c3/data/hbase/meta/1588230740 2024-12-09T01:51:23,658 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41875/user/jenkins/test-data/c531f78b-4684-755c-e834-db15031f05c3/data/hbase/meta/1588230740 2024-12-09T01:51:23,661 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-09T01:51:23,661 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-09T01:51:23,662 INFO [RS:2;ef6f18c58dc9:41167 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-09T01:51:23,662 INFO [RS:0;ef6f18c58dc9:35707 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-09T01:51:23,662 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-09T01:51:23,665 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-09T01:51:23,670 INFO [RS:1;ef6f18c58dc9:37065 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-09T01:51:23,674 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41875/user/jenkins/test-data/c531f78b-4684-755c-e834-db15031f05c3/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T01:51:23,675 INFO [RS:1;ef6f18c58dc9:37065 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-09T01:51:23,675 INFO [RS:2;ef6f18c58dc9:41167 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-09T01:51:23,675 INFO [RS:0;ef6f18c58dc9:35707 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-09T01:51:23,675 INFO [RS:1;ef6f18c58dc9:37065 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T01:51:23,675 INFO [RS:2;ef6f18c58dc9:41167 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T01:51:23,675 INFO [RS:0;ef6f18c58dc9:35707 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T01:51:23,676 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=68633348, jitterRate=0.02271658182144165}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-09T01:51:23,677 INFO [RS:0;ef6f18c58dc9:35707 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-09T01:51:23,677 INFO [RS:2;ef6f18c58dc9:41167 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-09T01:51:23,677 INFO [RS:1;ef6f18c58dc9:37065 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-09T01:51:23,679 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733709083635Initializing all the Stores at 1733709083637 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733709083637Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733709083637Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733709083637Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733709083637Cleaning up temporary data from old regions at 1733709083661 (+24 ms)Region opened successfully at 1733709083678 (+17 ms) 2024-12-09T01:51:23,679 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-09T01:51:23,679 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-09T01:51:23,679 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-09T01:51:23,679 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-09T01:51:23,679 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-09T01:51:23,681 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-09T01:51:23,681 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733709083679Disabling compacts and flushes for region at 1733709083679Disabling writes for close at 1733709083679Writing region close event to WAL at 1733709083681 (+2 ms)Closed at 1733709083681 2024-12-09T01:51:23,684 INFO [RS:2;ef6f18c58dc9:41167 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-09T01:51:23,684 INFO [RS:1;ef6f18c58dc9:37065 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-09T01:51:23,685 INFO [RS:0;ef6f18c58dc9:35707 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-09T01:51:23,686 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-09T01:51:23,686 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-09T01:51:23,686 INFO [RS:0;ef6f18c58dc9:35707 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-09T01:51:23,686 INFO [RS:1;ef6f18c58dc9:37065 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-09T01:51:23,686 INFO [RS:2;ef6f18c58dc9:41167 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-09T01:51:23,686 DEBUG [RS:1;ef6f18c58dc9:37065 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/ef6f18c58dc9:0, corePoolSize=1, maxPoolSize=1 2024-12-09T01:51:23,686 DEBUG [RS:0;ef6f18c58dc9:35707 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/ef6f18c58dc9:0, corePoolSize=1, maxPoolSize=1 2024-12-09T01:51:23,686 DEBUG [RS:2;ef6f18c58dc9:41167 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/ef6f18c58dc9:0, corePoolSize=1, maxPoolSize=1 2024-12-09T01:51:23,686 DEBUG [RS:0;ef6f18c58dc9:35707 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/ef6f18c58dc9:0, corePoolSize=1, maxPoolSize=1 2024-12-09T01:51:23,686 DEBUG [RS:1;ef6f18c58dc9:37065 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/ef6f18c58dc9:0, corePoolSize=1, maxPoolSize=1 2024-12-09T01:51:23,687 DEBUG [RS:2;ef6f18c58dc9:41167 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/ef6f18c58dc9:0, corePoolSize=1, maxPoolSize=1 2024-12-09T01:51:23,687 DEBUG [RS:0;ef6f18c58dc9:35707 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/ef6f18c58dc9:0, corePoolSize=1, maxPoolSize=1 2024-12-09T01:51:23,687 DEBUG [RS:1;ef6f18c58dc9:37065 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/ef6f18c58dc9:0, corePoolSize=1, maxPoolSize=1 2024-12-09T01:51:23,687 DEBUG [RS:0;ef6f18c58dc9:35707 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/ef6f18c58dc9:0, corePoolSize=1, maxPoolSize=1 2024-12-09T01:51:23,687 DEBUG [RS:2;ef6f18c58dc9:41167 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/ef6f18c58dc9:0, corePoolSize=1, maxPoolSize=1 2024-12-09T01:51:23,687 DEBUG [RS:1;ef6f18c58dc9:37065 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/ef6f18c58dc9:0, corePoolSize=1, maxPoolSize=1 2024-12-09T01:51:23,687 DEBUG [RS:0;ef6f18c58dc9:35707 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/ef6f18c58dc9:0, corePoolSize=1, maxPoolSize=1 2024-12-09T01:51:23,687 DEBUG [RS:2;ef6f18c58dc9:41167 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/ef6f18c58dc9:0, corePoolSize=1, maxPoolSize=1 2024-12-09T01:51:23,687 DEBUG [RS:1;ef6f18c58dc9:37065 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/ef6f18c58dc9:0, corePoolSize=1, maxPoolSize=1 2024-12-09T01:51:23,687 DEBUG [RS:0;ef6f18c58dc9:35707 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/ef6f18c58dc9:0, corePoolSize=2, maxPoolSize=2 2024-12-09T01:51:23,687 DEBUG [RS:1;ef6f18c58dc9:37065 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/ef6f18c58dc9:0, corePoolSize=2, maxPoolSize=2 2024-12-09T01:51:23,687 DEBUG [RS:2;ef6f18c58dc9:41167 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/ef6f18c58dc9:0, corePoolSize=1, maxPoolSize=1 2024-12-09T01:51:23,687 DEBUG [RS:0;ef6f18c58dc9:35707 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/ef6f18c58dc9:0, corePoolSize=1, maxPoolSize=1 2024-12-09T01:51:23,687 DEBUG [RS:1;ef6f18c58dc9:37065 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/ef6f18c58dc9:0, corePoolSize=1, maxPoolSize=1 2024-12-09T01:51:23,688 DEBUG [RS:0;ef6f18c58dc9:35707 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/ef6f18c58dc9:0, corePoolSize=1, maxPoolSize=1 2024-12-09T01:51:23,688 DEBUG [RS:2;ef6f18c58dc9:41167 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/ef6f18c58dc9:0, corePoolSize=2, maxPoolSize=2 2024-12-09T01:51:23,688 DEBUG [RS:1;ef6f18c58dc9:37065 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/ef6f18c58dc9:0, corePoolSize=1, maxPoolSize=1 2024-12-09T01:51:23,688 DEBUG [RS:0;ef6f18c58dc9:35707 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/ef6f18c58dc9:0, corePoolSize=1, maxPoolSize=1 2024-12-09T01:51:23,688 DEBUG [RS:1;ef6f18c58dc9:37065 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/ef6f18c58dc9:0, corePoolSize=1, maxPoolSize=1 2024-12-09T01:51:23,688 DEBUG [RS:2;ef6f18c58dc9:41167 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/ef6f18c58dc9:0, corePoolSize=1, maxPoolSize=1 2024-12-09T01:51:23,688 DEBUG [RS:0;ef6f18c58dc9:35707 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/ef6f18c58dc9:0, corePoolSize=1, maxPoolSize=1 2024-12-09T01:51:23,688 DEBUG [RS:1;ef6f18c58dc9:37065 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/ef6f18c58dc9:0, corePoolSize=1, maxPoolSize=1 2024-12-09T01:51:23,688 DEBUG [RS:2;ef6f18c58dc9:41167 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/ef6f18c58dc9:0, corePoolSize=1, maxPoolSize=1 2024-12-09T01:51:23,688 DEBUG [RS:0;ef6f18c58dc9:35707 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/ef6f18c58dc9:0, corePoolSize=1, maxPoolSize=1 2024-12-09T01:51:23,688 DEBUG [RS:1;ef6f18c58dc9:37065 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/ef6f18c58dc9:0, corePoolSize=1, maxPoolSize=1 2024-12-09T01:51:23,688 DEBUG [RS:2;ef6f18c58dc9:41167 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/ef6f18c58dc9:0, corePoolSize=1, maxPoolSize=1 2024-12-09T01:51:23,689 DEBUG [RS:1;ef6f18c58dc9:37065 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/ef6f18c58dc9:0, corePoolSize=1, maxPoolSize=1 2024-12-09T01:51:23,689 DEBUG [RS:0;ef6f18c58dc9:35707 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/ef6f18c58dc9:0, corePoolSize=1, maxPoolSize=1 2024-12-09T01:51:23,689 DEBUG [RS:2;ef6f18c58dc9:41167 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/ef6f18c58dc9:0, corePoolSize=1, maxPoolSize=1 2024-12-09T01:51:23,689 DEBUG [RS:1;ef6f18c58dc9:37065 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/ef6f18c58dc9:0, corePoolSize=3, maxPoolSize=3 2024-12-09T01:51:23,689 DEBUG [RS:0;ef6f18c58dc9:35707 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/ef6f18c58dc9:0, corePoolSize=3, maxPoolSize=3 2024-12-09T01:51:23,689 DEBUG [RS:2;ef6f18c58dc9:41167 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/ef6f18c58dc9:0, corePoolSize=1, maxPoolSize=1 2024-12-09T01:51:23,689 DEBUG [RS:1;ef6f18c58dc9:37065 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/ef6f18c58dc9:0, corePoolSize=3, maxPoolSize=3 2024-12-09T01:51:23,689 DEBUG [RS:0;ef6f18c58dc9:35707 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/ef6f18c58dc9:0, corePoolSize=3, maxPoolSize=3 2024-12-09T01:51:23,689 DEBUG [RS:2;ef6f18c58dc9:41167 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/ef6f18c58dc9:0, corePoolSize=1, maxPoolSize=1 2024-12-09T01:51:23,689 DEBUG [RS:2;ef6f18c58dc9:41167 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/ef6f18c58dc9:0, corePoolSize=3, maxPoolSize=3 2024-12-09T01:51:23,690 DEBUG [RS:2;ef6f18c58dc9:41167 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/ef6f18c58dc9:0, corePoolSize=3, maxPoolSize=3 2024-12-09T01:51:23,692 INFO [RS:1;ef6f18c58dc9:37065 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T01:51:23,692 INFO [RS:1;ef6f18c58dc9:37065 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T01:51:23,692 INFO [RS:1;ef6f18c58dc9:37065 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T01:51:23,693 INFO [RS:1;ef6f18c58dc9:37065 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-09T01:51:23,693 INFO [RS:1;ef6f18c58dc9:37065 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-09T01:51:23,693 INFO [RS:1;ef6f18c58dc9:37065 {}] hbase.ChoreService(168): Chore ScheduledChore name=ef6f18c58dc9,37065,1733709082461-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-09T01:51:23,694 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-09T01:51:23,696 INFO [RS:0;ef6f18c58dc9:35707 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T01:51:23,696 INFO [RS:0;ef6f18c58dc9:35707 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T01:51:23,696 INFO [RS:0;ef6f18c58dc9:35707 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T01:51:23,696 INFO [RS:0;ef6f18c58dc9:35707 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-09T01:51:23,696 INFO [RS:0;ef6f18c58dc9:35707 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-09T01:51:23,696 INFO [RS:0;ef6f18c58dc9:35707 {}] hbase.ChoreService(168): Chore ScheduledChore name=ef6f18c58dc9,35707,1733709082350-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-09T01:51:23,701 INFO [RS:2;ef6f18c58dc9:41167 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T01:51:23,701 INFO [RS:2;ef6f18c58dc9:41167 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T01:51:23,701 INFO [RS:2;ef6f18c58dc9:41167 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T01:51:23,701 INFO [RS:2;ef6f18c58dc9:41167 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-09T01:51:23,701 INFO [RS:2;ef6f18c58dc9:41167 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-09T01:51:23,702 INFO [RS:2;ef6f18c58dc9:41167 {}] hbase.ChoreService(168): Chore ScheduledChore name=ef6f18c58dc9,41167,1733709082511-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-09T01:51:23,705 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-09T01:51:23,711 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-09T01:51:23,726 INFO [RS:1;ef6f18c58dc9:37065 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-09T01:51:23,727 INFO [RS:0;ef6f18c58dc9:35707 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-09T01:51:23,728 INFO [RS:2;ef6f18c58dc9:41167 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-09T01:51:23,729 INFO [RS:0;ef6f18c58dc9:35707 {}] hbase.ChoreService(168): Chore ScheduledChore name=ef6f18c58dc9,35707,1733709082350-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T01:51:23,729 INFO [RS:1;ef6f18c58dc9:37065 {}] hbase.ChoreService(168): Chore ScheduledChore name=ef6f18c58dc9,37065,1733709082461-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T01:51:23,729 INFO [RS:2;ef6f18c58dc9:41167 {}] hbase.ChoreService(168): Chore ScheduledChore name=ef6f18c58dc9,41167,1733709082511-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T01:51:23,729 INFO [RS:0;ef6f18c58dc9:35707 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T01:51:23,729 INFO [RS:2;ef6f18c58dc9:41167 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T01:51:23,729 INFO [RS:1;ef6f18c58dc9:37065 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T01:51:23,729 INFO [RS:0;ef6f18c58dc9:35707 {}] regionserver.Replication(171): ef6f18c58dc9,35707,1733709082350 started 2024-12-09T01:51:23,729 INFO [RS:2;ef6f18c58dc9:41167 {}] regionserver.Replication(171): ef6f18c58dc9,41167,1733709082511 started 2024-12-09T01:51:23,729 INFO [RS:1;ef6f18c58dc9:37065 {}] regionserver.Replication(171): ef6f18c58dc9,37065,1733709082461 started 2024-12-09T01:51:23,753 INFO [RS:0;ef6f18c58dc9:35707 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T01:51:23,754 INFO [RS:0;ef6f18c58dc9:35707 {}] regionserver.HRegionServer(1482): Serving as ef6f18c58dc9,35707,1733709082350, RpcServer on ef6f18c58dc9/172.17.0.2:35707, sessionid=0x1007478e1e60001 2024-12-09T01:51:23,754 INFO [RS:1;ef6f18c58dc9:37065 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T01:51:23,754 INFO [RS:2;ef6f18c58dc9:41167 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T01:51:23,754 INFO [RS:1;ef6f18c58dc9:37065 {}] regionserver.HRegionServer(1482): Serving as ef6f18c58dc9,37065,1733709082461, RpcServer on ef6f18c58dc9/172.17.0.2:37065, sessionid=0x1007478e1e60002 2024-12-09T01:51:23,754 INFO [RS:2;ef6f18c58dc9:41167 {}] regionserver.HRegionServer(1482): Serving as ef6f18c58dc9,41167,1733709082511, RpcServer on ef6f18c58dc9/172.17.0.2:41167, sessionid=0x1007478e1e60003 2024-12-09T01:51:23,755 DEBUG [RS:0;ef6f18c58dc9:35707 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-09T01:51:23,755 DEBUG [RS:2;ef6f18c58dc9:41167 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-09T01:51:23,755 DEBUG [RS:1;ef6f18c58dc9:37065 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-09T01:51:23,755 DEBUG [RS:0;ef6f18c58dc9:35707 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager ef6f18c58dc9,35707,1733709082350 2024-12-09T01:51:23,755 DEBUG [RS:1;ef6f18c58dc9:37065 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager ef6f18c58dc9,37065,1733709082461 2024-12-09T01:51:23,755 DEBUG [RS:2;ef6f18c58dc9:41167 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager ef6f18c58dc9,41167,1733709082511 2024-12-09T01:51:23,755 DEBUG [RS:0;ef6f18c58dc9:35707 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'ef6f18c58dc9,35707,1733709082350' 2024-12-09T01:51:23,755 DEBUG [RS:1;ef6f18c58dc9:37065 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'ef6f18c58dc9,37065,1733709082461' 2024-12-09T01:51:23,755 DEBUG [RS:2;ef6f18c58dc9:41167 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'ef6f18c58dc9,41167,1733709082511' 2024-12-09T01:51:23,756 DEBUG [RS:0;ef6f18c58dc9:35707 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-09T01:51:23,756 DEBUG [RS:1;ef6f18c58dc9:37065 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-09T01:51:23,756 DEBUG [RS:2;ef6f18c58dc9:41167 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-09T01:51:23,757 DEBUG [RS:2;ef6f18c58dc9:41167 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-09T01:51:23,757 DEBUG [RS:1;ef6f18c58dc9:37065 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-09T01:51:23,757 DEBUG [RS:0;ef6f18c58dc9:35707 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-09T01:51:23,758 DEBUG [RS:2;ef6f18c58dc9:41167 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-09T01:51:23,758 DEBUG [RS:1;ef6f18c58dc9:37065 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-09T01:51:23,758 DEBUG [RS:1;ef6f18c58dc9:37065 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-09T01:51:23,758 DEBUG [RS:2;ef6f18c58dc9:41167 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-09T01:51:23,758 DEBUG [RS:0;ef6f18c58dc9:35707 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-09T01:51:23,758 DEBUG [RS:1;ef6f18c58dc9:37065 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager ef6f18c58dc9,37065,1733709082461 2024-12-09T01:51:23,758 DEBUG [RS:2;ef6f18c58dc9:41167 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager ef6f18c58dc9,41167,1733709082511 2024-12-09T01:51:23,758 DEBUG [RS:0;ef6f18c58dc9:35707 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-09T01:51:23,758 DEBUG [RS:1;ef6f18c58dc9:37065 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'ef6f18c58dc9,37065,1733709082461' 2024-12-09T01:51:23,758 DEBUG [RS:2;ef6f18c58dc9:41167 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'ef6f18c58dc9,41167,1733709082511' 2024-12-09T01:51:23,758 DEBUG [RS:0;ef6f18c58dc9:35707 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager ef6f18c58dc9,35707,1733709082350 2024-12-09T01:51:23,758 DEBUG [RS:1;ef6f18c58dc9:37065 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-09T01:51:23,758 DEBUG [RS:2;ef6f18c58dc9:41167 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-09T01:51:23,758 DEBUG [RS:0;ef6f18c58dc9:35707 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'ef6f18c58dc9,35707,1733709082350' 2024-12-09T01:51:23,758 DEBUG [RS:0;ef6f18c58dc9:35707 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-09T01:51:23,759 DEBUG [RS:1;ef6f18c58dc9:37065 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-09T01:51:23,759 DEBUG [RS:2;ef6f18c58dc9:41167 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-09T01:51:23,760 DEBUG [RS:0;ef6f18c58dc9:35707 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-09T01:51:23,760 DEBUG [RS:1;ef6f18c58dc9:37065 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-09T01:51:23,760 INFO [RS:1;ef6f18c58dc9:37065 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-09T01:51:23,760 INFO [RS:1;ef6f18c58dc9:37065 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-09T01:51:23,760 DEBUG [RS:2;ef6f18c58dc9:41167 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-09T01:51:23,760 INFO [RS:2;ef6f18c58dc9:41167 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-09T01:51:23,760 INFO [RS:2;ef6f18c58dc9:41167 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-09T01:51:23,761 DEBUG [RS:0;ef6f18c58dc9:35707 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-09T01:51:23,761 INFO [RS:0;ef6f18c58dc9:35707 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-09T01:51:23,761 INFO [RS:0;ef6f18c58dc9:35707 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-09T01:51:23,862 WARN [ef6f18c58dc9:34719 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-09T01:51:23,866 INFO [RS:1;ef6f18c58dc9:37065 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-09T01:51:23,866 INFO [RS:2;ef6f18c58dc9:41167 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-09T01:51:23,866 INFO [RS:0;ef6f18c58dc9:35707 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-09T01:51:23,869 INFO [RS:2;ef6f18c58dc9:41167 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=ef6f18c58dc9%2C41167%2C1733709082511, suffix=, logDir=hdfs://localhost:41875/user/jenkins/test-data/c531f78b-4684-755c-e834-db15031f05c3/WALs/ef6f18c58dc9,41167,1733709082511, archiveDir=hdfs://localhost:41875/user/jenkins/test-data/c531f78b-4684-755c-e834-db15031f05c3/oldWALs, maxLogs=32 2024-12-09T01:51:23,869 INFO [RS:0;ef6f18c58dc9:35707 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=ef6f18c58dc9%2C35707%2C1733709082350, suffix=, logDir=hdfs://localhost:41875/user/jenkins/test-data/c531f78b-4684-755c-e834-db15031f05c3/WALs/ef6f18c58dc9,35707,1733709082350, archiveDir=hdfs://localhost:41875/user/jenkins/test-data/c531f78b-4684-755c-e834-db15031f05c3/oldWALs, maxLogs=32 2024-12-09T01:51:23,869 INFO [RS:1;ef6f18c58dc9:37065 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=ef6f18c58dc9%2C37065%2C1733709082461, suffix=, logDir=hdfs://localhost:41875/user/jenkins/test-data/c531f78b-4684-755c-e834-db15031f05c3/WALs/ef6f18c58dc9,37065,1733709082461, archiveDir=hdfs://localhost:41875/user/jenkins/test-data/c531f78b-4684-755c-e834-db15031f05c3/oldWALs, maxLogs=32 2024-12-09T01:51:23,889 DEBUG [RS:2;ef6f18c58dc9:41167 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/c531f78b-4684-755c-e834-db15031f05c3/WALs/ef6f18c58dc9,41167,1733709082511/ef6f18c58dc9%2C41167%2C1733709082511.1733709083875, exclude list is [], retry=0 2024-12-09T01:51:23,889 DEBUG [RS:1;ef6f18c58dc9:37065 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/c531f78b-4684-755c-e834-db15031f05c3/WALs/ef6f18c58dc9,37065,1733709082461/ef6f18c58dc9%2C37065%2C1733709082461.1733709083875, exclude list is [], retry=0 2024-12-09T01:51:23,896 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35415,DS-26a06a3a-54a9-41d5-830b-1d5e13bbbc7f,DISK] 2024-12-09T01:51:23,896 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46601,DS-2705aa78-68cb-4415-a347-4b6277a1d45b,DISK] 2024-12-09T01:51:23,896 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46601,DS-2705aa78-68cb-4415-a347-4b6277a1d45b,DISK] 2024-12-09T01:51:23,896 DEBUG [RS:0;ef6f18c58dc9:35707 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/c531f78b-4684-755c-e834-db15031f05c3/WALs/ef6f18c58dc9,35707,1733709082350/ef6f18c58dc9%2C35707%2C1733709082350.1733709083875, exclude list is [], retry=0 2024-12-09T01:51:23,896 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:40555,DS-713922a2-6e91-4c11-b1c8-464cc610afc0,DISK] 2024-12-09T01:51:23,896 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:40555,DS-713922a2-6e91-4c11-b1c8-464cc610afc0,DISK] 2024-12-09T01:51:23,897 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35415,DS-26a06a3a-54a9-41d5-830b-1d5e13bbbc7f,DISK] 2024-12-09T01:51:23,922 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46601,DS-2705aa78-68cb-4415-a347-4b6277a1d45b,DISK] 2024-12-09T01:51:23,922 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:40555,DS-713922a2-6e91-4c11-b1c8-464cc610afc0,DISK] 2024-12-09T01:51:23,922 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35415,DS-26a06a3a-54a9-41d5-830b-1d5e13bbbc7f,DISK] 2024-12-09T01:51:23,924 INFO [RS:2;ef6f18c58dc9:41167 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/c531f78b-4684-755c-e834-db15031f05c3/WALs/ef6f18c58dc9,41167,1733709082511/ef6f18c58dc9%2C41167%2C1733709082511.1733709083875 2024-12-09T01:51:23,925 DEBUG [RS:2;ef6f18c58dc9:41167 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:39577:39577),(127.0.0.1/127.0.0.1:38907:38907),(127.0.0.1/127.0.0.1:35427:35427)] 2024-12-09T01:51:23,926 INFO [RS:1;ef6f18c58dc9:37065 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/c531f78b-4684-755c-e834-db15031f05c3/WALs/ef6f18c58dc9,37065,1733709082461/ef6f18c58dc9%2C37065%2C1733709082461.1733709083875 2024-12-09T01:51:23,928 DEBUG [RS:1;ef6f18c58dc9:37065 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:35427:35427),(127.0.0.1/127.0.0.1:39577:39577),(127.0.0.1/127.0.0.1:38907:38907)] 2024-12-09T01:51:23,932 INFO [RS:0;ef6f18c58dc9:35707 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/c531f78b-4684-755c-e834-db15031f05c3/WALs/ef6f18c58dc9,35707,1733709082350/ef6f18c58dc9%2C35707%2C1733709082350.1733709083875 2024-12-09T01:51:23,936 DEBUG [RS:0;ef6f18c58dc9:35707 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:39577:39577),(127.0.0.1/127.0.0.1:35427:35427),(127.0.0.1/127.0.0.1:38907:38907)] 2024-12-09T01:51:24,115 DEBUG [ef6f18c58dc9:34719 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=3, allServersCount=3 2024-12-09T01:51:24,123 DEBUG [ef6f18c58dc9:34719 {}] balancer.BalancerClusterState(204): Hosts are {ef6f18c58dc9=0} racks are {/default-rack=0} 2024-12-09T01:51:24,130 DEBUG [ef6f18c58dc9:34719 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-09T01:51:24,130 DEBUG [ef6f18c58dc9:34719 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-09T01:51:24,130 DEBUG [ef6f18c58dc9:34719 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-09T01:51:24,130 DEBUG [ef6f18c58dc9:34719 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-09T01:51:24,130 DEBUG [ef6f18c58dc9:34719 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-09T01:51:24,130 DEBUG [ef6f18c58dc9:34719 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-09T01:51:24,130 INFO [ef6f18c58dc9:34719 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-09T01:51:24,130 INFO [ef6f18c58dc9:34719 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-09T01:51:24,130 INFO [ef6f18c58dc9:34719 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-09T01:51:24,130 DEBUG [ef6f18c58dc9:34719 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-09T01:51:24,138 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=ef6f18c58dc9,41167,1733709082511 2024-12-09T01:51:24,145 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as ef6f18c58dc9,41167,1733709082511, state=OPENING 2024-12-09T01:51:24,151 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-09T01:51:24,154 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41167-0x1007478e1e60003, quorum=127.0.0.1:49937, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T01:51:24,154 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34719-0x1007478e1e60000, quorum=127.0.0.1:49937, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T01:51:24,154 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37065-0x1007478e1e60002, quorum=127.0.0.1:49937, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T01:51:24,154 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35707-0x1007478e1e60001, quorum=127.0.0.1:49937, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T01:51:24,155 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T01:51:24,155 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T01:51:24,155 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T01:51:24,155 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T01:51:24,157 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-09T01:51:24,159 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=ef6f18c58dc9,41167,1733709082511}] 2024-12-09T01:51:24,337 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-09T01:51:24,340 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41213, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-09T01:51:24,353 INFO [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-09T01:51:24,354 INFO [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-09T01:51:24,355 INFO [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-12-09T01:51:24,358 INFO [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=ef6f18c58dc9%2C41167%2C1733709082511.meta, suffix=.meta, logDir=hdfs://localhost:41875/user/jenkins/test-data/c531f78b-4684-755c-e834-db15031f05c3/WALs/ef6f18c58dc9,41167,1733709082511, archiveDir=hdfs://localhost:41875/user/jenkins/test-data/c531f78b-4684-755c-e834-db15031f05c3/oldWALs, maxLogs=32 2024-12-09T01:51:24,374 DEBUG [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/c531f78b-4684-755c-e834-db15031f05c3/WALs/ef6f18c58dc9,41167,1733709082511/ef6f18c58dc9%2C41167%2C1733709082511.meta.1733709084360.meta, exclude list is [], retry=0 2024-12-09T01:51:24,379 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:40555,DS-713922a2-6e91-4c11-b1c8-464cc610afc0,DISK] 2024-12-09T01:51:24,379 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46601,DS-2705aa78-68cb-4415-a347-4b6277a1d45b,DISK] 2024-12-09T01:51:24,379 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35415,DS-26a06a3a-54a9-41d5-830b-1d5e13bbbc7f,DISK] 2024-12-09T01:51:24,382 INFO [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/c531f78b-4684-755c-e834-db15031f05c3/WALs/ef6f18c58dc9,41167,1733709082511/ef6f18c58dc9%2C41167%2C1733709082511.meta.1733709084360.meta 2024-12-09T01:51:24,382 DEBUG [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:38907:38907),(127.0.0.1/127.0.0.1:39577:39577),(127.0.0.1/127.0.0.1:35427:35427)] 2024-12-09T01:51:24,383 DEBUG [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-09T01:51:24,384 DEBUG [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-09T01:51:24,387 DEBUG [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-09T01:51:24,392 INFO [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-09T01:51:24,397 DEBUG [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-09T01:51:24,397 DEBUG [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T01:51:24,397 DEBUG [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-09T01:51:24,397 DEBUG [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-09T01:51:24,401 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-09T01:51:24,402 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-09T01:51:24,403 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T01:51:24,404 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T01:51:24,404 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-09T01:51:24,406 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-09T01:51:24,406 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T01:51:24,407 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T01:51:24,407 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-09T01:51:24,409 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-09T01:51:24,409 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T01:51:24,410 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T01:51:24,410 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-09T01:51:24,411 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-09T01:51:24,411 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T01:51:24,412 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T01:51:24,412 DEBUG [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-09T01:51:24,414 DEBUG [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41875/user/jenkins/test-data/c531f78b-4684-755c-e834-db15031f05c3/data/hbase/meta/1588230740 2024-12-09T01:51:24,417 DEBUG [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41875/user/jenkins/test-data/c531f78b-4684-755c-e834-db15031f05c3/data/hbase/meta/1588230740 2024-12-09T01:51:24,419 DEBUG [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-09T01:51:24,419 DEBUG [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-09T01:51:24,420 DEBUG [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-09T01:51:24,423 DEBUG [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-09T01:51:24,424 INFO [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=60796171, jitterRate=-0.09406645596027374}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-09T01:51:24,424 DEBUG [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-09T01:51:24,426 DEBUG [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733709084398Writing region info on filesystem at 1733709084398Initializing all the Stores at 1733709084400 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733709084400Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733709084400Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733709084400Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733709084400Cleaning up temporary data from old regions at 1733709084419 (+19 ms)Running coprocessor post-open hooks at 1733709084424 (+5 ms)Region opened successfully at 1733709084426 (+2 ms) 2024-12-09T01:51:24,434 INFO [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733709084328 2024-12-09T01:51:24,446 DEBUG [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-09T01:51:24,446 INFO [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-09T01:51:24,448 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=ef6f18c58dc9,41167,1733709082511 2024-12-09T01:51:24,451 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as ef6f18c58dc9,41167,1733709082511, state=OPEN 2024-12-09T01:51:24,453 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34719-0x1007478e1e60000, quorum=127.0.0.1:49937, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T01:51:24,453 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41167-0x1007478e1e60003, quorum=127.0.0.1:49937, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T01:51:24,453 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35707-0x1007478e1e60001, quorum=127.0.0.1:49937, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T01:51:24,453 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37065-0x1007478e1e60002, quorum=127.0.0.1:49937, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T01:51:24,453 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T01:51:24,453 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T01:51:24,453 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T01:51:24,453 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T01:51:24,454 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=ef6f18c58dc9,41167,1733709082511 2024-12-09T01:51:24,461 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-09T01:51:24,461 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=ef6f18c58dc9,41167,1733709082511 in 295 msec 2024-12-09T01:51:24,469 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-09T01:51:24,469 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 769 msec 2024-12-09T01:51:24,470 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-09T01:51:24,471 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-09T01:51:24,492 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T01:51:24,494 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=ef6f18c58dc9,41167,1733709082511, seqNum=-1] 2024-12-09T01:51:24,515 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T01:51:24,518 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42403, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T01:51:24,538 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.1190 sec 2024-12-09T01:51:24,539 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733709084538, completionTime=-1 2024-12-09T01:51:24,541 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=3; waited=0ms, expected min=3 server(s), max=3 server(s), master is running 2024-12-09T01:51:24,542 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-09T01:51:24,576 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=3 2024-12-09T01:51:24,576 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733709144576 2024-12-09T01:51:24,576 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733709204576 2024-12-09T01:51:24,576 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 34 msec 2024-12-09T01:51:24,578 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 1588230740 changed from -1.0 to 0.0, refreshing cache 2024-12-09T01:51:24,585 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ef6f18c58dc9,34719,1733709081654-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T01:51:24,585 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ef6f18c58dc9,34719,1733709081654-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T01:51:24,586 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ef6f18c58dc9,34719,1733709081654-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T01:51:24,587 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-ef6f18c58dc9:34719, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T01:51:24,588 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-09T01:51:24,588 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-09T01:51:24,594 DEBUG [master/ef6f18c58dc9:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-09T01:51:24,617 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 2.021sec 2024-12-09T01:51:24,618 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-09T01:51:24,619 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-09T01:51:24,621 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-09T01:51:24,621 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-09T01:51:24,621 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-09T01:51:24,622 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ef6f18c58dc9,34719,1733709081654-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-09T01:51:24,623 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ef6f18c58dc9,34719,1733709081654-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-09T01:51:24,627 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-09T01:51:24,628 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-09T01:51:24,629 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ef6f18c58dc9,34719,1733709081654-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T01:51:24,654 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@74d4950b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T01:51:24,660 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-12-09T01:51:24,660 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-12-09T01:51:24,686 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request ef6f18c58dc9,34719,-1 for getting cluster id 2024-12-09T01:51:24,693 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-09T01:51:24,707 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '2c4a86d9-d0c3-48a2-8f3e-d1a975e4d197' 2024-12-09T01:51:24,709 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-09T01:51:24,710 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "2c4a86d9-d0c3-48a2-8f3e-d1a975e4d197" 2024-12-09T01:51:24,710 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1735b109, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T01:51:24,710 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [ef6f18c58dc9,34719,-1] 2024-12-09T01:51:24,714 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-09T01:51:24,716 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T01:51:24,718 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58970, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-09T01:51:24,726 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@70486ec5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T01:51:24,727 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T01:51:24,735 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=ef6f18c58dc9,41167,1733709082511, seqNum=-1] 2024-12-09T01:51:24,736 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T01:51:24,739 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35966, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T01:51:24,760 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=ef6f18c58dc9,34719,1733709081654 2024-12-09T01:51:24,764 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-09T01:51:24,769 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.AsyncConnectionImpl(321): The fetched master address is ef6f18c58dc9,34719,1733709081654 2024-12-09T01:51:24,771 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@33e7eef 2024-12-09T01:51:24,772 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-09T01:51:24,775 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58980, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-09T01:51:24,782 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34719 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-09T01:51:24,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34719 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC 2024-12-09T01:51:24,794 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_PRE_OPERATION 2024-12-09T01:51:24,797 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34719 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestHBaseWalOnEC" procId is: 4 2024-12-09T01:51:24,797 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T01:51:24,800 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-09T01:51:24,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34719 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-09T01:51:24,811 WARN [PEWorker-3 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T01:51:24,811 WARN [PEWorker-3 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T01:51:24,817 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-2078043917_22 at /127.0.0.1:38838 [Receiving block BP-559585578-172.17.0.2-1733709078255:blk_-9223372036854775680_1020] {}] datanode.DataXceiver(331): 127.0.0.1:40555:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:38838 dst: /127.0.0.1:40555 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T01:51:24,825 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40555 is added to blk_-9223372036854775680_1021 (size=392) 2024-12-09T01:51:24,826 WARN [PEWorker-3 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-09T01:51:24,829 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 9668fa925f1116a6282af470b96a7ff0, NAME => 'TestHBaseWalOnEC,,1733709084776.9668fa925f1116a6282af470b96a7ff0.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:41875/user/jenkins/test-data/c531f78b-4684-755c-e834-db15031f05c3 2024-12-09T01:51:24,836 WARN [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T01:51:24,836 WARN [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T01:51:24,845 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-2078043917_22 at /127.0.0.1:38856 [Receiving block BP-559585578-172.17.0.2-1733709078255:blk_-9223372036854775664_1022] {}] datanode.DataXceiver(331): 127.0.0.1:40555:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:38856 dst: /127.0.0.1:40555 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T01:51:24,850 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40555 is added to blk_-9223372036854775664_1023 (size=51) 2024-12-09T01:51:24,851 WARN [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-09T01:51:24,852 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1733709084776.9668fa925f1116a6282af470b96a7ff0.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T01:51:24,852 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1722): Closing 9668fa925f1116a6282af470b96a7ff0, disabling compactions & flushes 2024-12-09T01:51:24,852 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1733709084776.9668fa925f1116a6282af470b96a7ff0. 2024-12-09T01:51:24,852 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1733709084776.9668fa925f1116a6282af470b96a7ff0. 2024-12-09T01:51:24,852 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1733709084776.9668fa925f1116a6282af470b96a7ff0. after waiting 0 ms 2024-12-09T01:51:24,852 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1733709084776.9668fa925f1116a6282af470b96a7ff0. 2024-12-09T01:51:24,852 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1733709084776.9668fa925f1116a6282af470b96a7ff0. 2024-12-09T01:51:24,852 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1676): Region close journal for 9668fa925f1116a6282af470b96a7ff0: Waiting for close lock at 1733709084852Disabling compacts and flushes for region at 1733709084852Disabling writes for close at 1733709084852Writing region close event to WAL at 1733709084852Closed at 1733709084852 2024-12-09T01:51:24,855 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ADD_TO_META 2024-12-09T01:51:24,862 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestHBaseWalOnEC,,1733709084776.9668fa925f1116a6282af470b96a7ff0.","families":{"info":[{"qualifier":"regioninfo","vlen":50,"tag":[],"timestamp":"1733709084855"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733709084855"}]},"ts":"1733709084855"} 2024-12-09T01:51:24,869 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-09T01:51:24,871 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-09T01:51:24,874 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733709084871"}]},"ts":"1733709084871"} 2024-12-09T01:51:24,879 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLING in hbase:meta 2024-12-09T01:51:24,880 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {ef6f18c58dc9=0} racks are {/default-rack=0} 2024-12-09T01:51:24,881 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-09T01:51:24,881 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-09T01:51:24,881 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-09T01:51:24,881 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-09T01:51:24,881 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-09T01:51:24,881 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-09T01:51:24,881 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-09T01:51:24,881 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-09T01:51:24,881 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-09T01:51:24,881 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-09T01:51:24,883 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=9668fa925f1116a6282af470b96a7ff0, ASSIGN}] 2024-12-09T01:51:24,885 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=9668fa925f1116a6282af470b96a7ff0, ASSIGN 2024-12-09T01:51:24,888 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=9668fa925f1116a6282af470b96a7ff0, ASSIGN; state=OFFLINE, location=ef6f18c58dc9,35707,1733709082350; forceNewPlan=false, retain=false 2024-12-09T01:51:24,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34719 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-09T01:51:25,040 INFO [ef6f18c58dc9:34719 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-09T01:51:25,041 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=9668fa925f1116a6282af470b96a7ff0, regionState=OPENING, regionLocation=ef6f18c58dc9,35707,1733709082350 2024-12-09T01:51:25,046 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=9668fa925f1116a6282af470b96a7ff0, ASSIGN because future has completed 2024-12-09T01:51:25,047 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 9668fa925f1116a6282af470b96a7ff0, server=ef6f18c58dc9,35707,1733709082350}] 2024-12-09T01:51:25,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34719 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-09T01:51:25,201 DEBUG [RSProcedureDispatcher-pool-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-09T01:51:25,204 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50081, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-09T01:51:25,210 INFO [RS_OPEN_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestHBaseWalOnEC,,1733709084776.9668fa925f1116a6282af470b96a7ff0. 2024-12-09T01:51:25,210 DEBUG [RS_OPEN_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 9668fa925f1116a6282af470b96a7ff0, NAME => 'TestHBaseWalOnEC,,1733709084776.9668fa925f1116a6282af470b96a7ff0.', STARTKEY => '', ENDKEY => ''} 2024-12-09T01:51:25,211 DEBUG [RS_OPEN_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestHBaseWalOnEC 9668fa925f1116a6282af470b96a7ff0 2024-12-09T01:51:25,211 DEBUG [RS_OPEN_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1733709084776.9668fa925f1116a6282af470b96a7ff0.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T01:51:25,211 DEBUG [RS_OPEN_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 9668fa925f1116a6282af470b96a7ff0 2024-12-09T01:51:25,211 DEBUG [RS_OPEN_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 9668fa925f1116a6282af470b96a7ff0 2024-12-09T01:51:25,215 INFO [StoreOpener-9668fa925f1116a6282af470b96a7ff0-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 9668fa925f1116a6282af470b96a7ff0 2024-12-09T01:51:25,217 INFO [StoreOpener-9668fa925f1116a6282af470b96a7ff0-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 9668fa925f1116a6282af470b96a7ff0 columnFamilyName cf 2024-12-09T01:51:25,217 DEBUG [StoreOpener-9668fa925f1116a6282af470b96a7ff0-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T01:51:25,218 INFO [StoreOpener-9668fa925f1116a6282af470b96a7ff0-1 {}] regionserver.HStore(327): Store=9668fa925f1116a6282af470b96a7ff0/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T01:51:25,219 DEBUG [RS_OPEN_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 9668fa925f1116a6282af470b96a7ff0 2024-12-09T01:51:25,220 DEBUG [RS_OPEN_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41875/user/jenkins/test-data/c531f78b-4684-755c-e834-db15031f05c3/data/default/TestHBaseWalOnEC/9668fa925f1116a6282af470b96a7ff0 2024-12-09T01:51:25,221 DEBUG [RS_OPEN_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41875/user/jenkins/test-data/c531f78b-4684-755c-e834-db15031f05c3/data/default/TestHBaseWalOnEC/9668fa925f1116a6282af470b96a7ff0 2024-12-09T01:51:25,221 DEBUG [RS_OPEN_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 9668fa925f1116a6282af470b96a7ff0 2024-12-09T01:51:25,221 DEBUG [RS_OPEN_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 9668fa925f1116a6282af470b96a7ff0 2024-12-09T01:51:25,224 DEBUG [RS_OPEN_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 9668fa925f1116a6282af470b96a7ff0 2024-12-09T01:51:25,229 DEBUG [RS_OPEN_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41875/user/jenkins/test-data/c531f78b-4684-755c-e834-db15031f05c3/data/default/TestHBaseWalOnEC/9668fa925f1116a6282af470b96a7ff0/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T01:51:25,230 INFO [RS_OPEN_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 9668fa925f1116a6282af470b96a7ff0; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=61700340, jitterRate=-0.0805932879447937}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-09T01:51:25,230 DEBUG [RS_OPEN_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 9668fa925f1116a6282af470b96a7ff0 2024-12-09T01:51:25,232 DEBUG [RS_OPEN_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 9668fa925f1116a6282af470b96a7ff0: Running coprocessor pre-open hook at 1733709085211Writing region info on filesystem at 1733709085211Initializing all the Stores at 1733709085213 (+2 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733709085213Cleaning up temporary data from old regions at 1733709085222 (+9 ms)Running coprocessor post-open hooks at 1733709085231 (+9 ms)Region opened successfully at 1733709085232 (+1 ms) 2024-12-09T01:51:25,234 INFO [RS_OPEN_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestHBaseWalOnEC,,1733709084776.9668fa925f1116a6282af470b96a7ff0., pid=6, masterSystemTime=1733709085201 2024-12-09T01:51:25,237 DEBUG [RS_OPEN_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestHBaseWalOnEC,,1733709084776.9668fa925f1116a6282af470b96a7ff0. 2024-12-09T01:51:25,237 INFO [RS_OPEN_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestHBaseWalOnEC,,1733709084776.9668fa925f1116a6282af470b96a7ff0. 2024-12-09T01:51:25,238 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=9668fa925f1116a6282af470b96a7ff0, regionState=OPEN, openSeqNum=2, regionLocation=ef6f18c58dc9,35707,1733709082350 2024-12-09T01:51:25,242 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 9668fa925f1116a6282af470b96a7ff0, server=ef6f18c58dc9,35707,1733709082350 because future has completed 2024-12-09T01:51:25,247 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-09T01:51:25,247 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 9668fa925f1116a6282af470b96a7ff0, server=ef6f18c58dc9,35707,1733709082350 in 197 msec 2024-12-09T01:51:25,251 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-09T01:51:25,251 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=9668fa925f1116a6282af470b96a7ff0, ASSIGN in 364 msec 2024-12-09T01:51:25,253 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-09T01:51:25,253 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733709085253"}]},"ts":"1733709085253"} 2024-12-09T01:51:25,256 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLED in hbase:meta 2024-12-09T01:51:25,258 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_POST_OPERATION 2024-12-09T01:51:25,262 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC in 474 msec 2024-12-09T01:51:25,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34719 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-09T01:51:25,433 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestHBaseWalOnEC completed 2024-12-09T01:51:25,433 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table TestHBaseWalOnEC get assigned. Timeout = 60000ms 2024-12-09T01:51:25,434 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-09T01:51:25,439 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table TestHBaseWalOnEC assigned to meta. Checking AM states. 2024-12-09T01:51:25,440 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-09T01:51:25,440 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table TestHBaseWalOnEC assigned. 2024-12-09T01:51:25,448 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestHBaseWalOnEC', row='row', locateType=CURRENT is [region=TestHBaseWalOnEC,,1733709084776.9668fa925f1116a6282af470b96a7ff0., hostname=ef6f18c58dc9,35707,1733709082350, seqNum=2] 2024-12-09T01:51:25,449 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T01:51:25,452 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52930, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T01:51:25,460 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34719 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestHBaseWalOnEC 2024-12-09T01:51:25,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34719 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC 2024-12-09T01:51:25,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34719 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-09T01:51:25,467 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_PREPARE 2024-12-09T01:51:25,469 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-09T01:51:25,470 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-09T01:51:25,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34719 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-09T01:51:25,631 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35707 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-12-09T01:51:25,632 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ef6f18c58dc9:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestHBaseWalOnEC,,1733709084776.9668fa925f1116a6282af470b96a7ff0. 2024-12-09T01:51:25,635 INFO [RS_FLUSH_OPERATIONS-regionserver/ef6f18c58dc9:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing 9668fa925f1116a6282af470b96a7ff0 1/1 column families, dataSize=32 B heapSize=360 B 2024-12-09T01:51:25,694 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ef6f18c58dc9:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41875/user/jenkins/test-data/c531f78b-4684-755c-e834-db15031f05c3/data/default/TestHBaseWalOnEC/9668fa925f1116a6282af470b96a7ff0/.tmp/cf/1700318a6f4849ec8917a4bf07f04e5f is 36, key is row/cf:cq/1733709085452/Put/seqid=0 2024-12-09T01:51:25,702 WARN [RS_FLUSH_OPERATIONS-regionserver/ef6f18c58dc9:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T01:51:25,702 WARN [RS_FLUSH_OPERATIONS-regionserver/ef6f18c58dc9:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T01:51:25,707 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_225273579_22 at /127.0.0.1:34418 [Receiving block BP-559585578-172.17.0.2-1733709078255:blk_-9223372036854775648_1024] {}] datanode.DataXceiver(331): 127.0.0.1:46601:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34418 dst: /127.0.0.1:46601 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T01:51:25,712 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46601 is added to blk_-9223372036854775648_1025 (size=4787) 2024-12-09T01:51:25,713 WARN [RS_FLUSH_OPERATIONS-regionserver/ef6f18c58dc9:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-09T01:51:25,714 INFO [RS_FLUSH_OPERATIONS-regionserver/ef6f18c58dc9:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=32 B at sequenceid=5 (bloomFilter=false), to=hdfs://localhost:41875/user/jenkins/test-data/c531f78b-4684-755c-e834-db15031f05c3/data/default/TestHBaseWalOnEC/9668fa925f1116a6282af470b96a7ff0/.tmp/cf/1700318a6f4849ec8917a4bf07f04e5f 2024-12-09T01:51:25,761 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ef6f18c58dc9:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41875/user/jenkins/test-data/c531f78b-4684-755c-e834-db15031f05c3/data/default/TestHBaseWalOnEC/9668fa925f1116a6282af470b96a7ff0/.tmp/cf/1700318a6f4849ec8917a4bf07f04e5f as hdfs://localhost:41875/user/jenkins/test-data/c531f78b-4684-755c-e834-db15031f05c3/data/default/TestHBaseWalOnEC/9668fa925f1116a6282af470b96a7ff0/cf/1700318a6f4849ec8917a4bf07f04e5f 2024-12-09T01:51:25,772 INFO [RS_FLUSH_OPERATIONS-regionserver/ef6f18c58dc9:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41875/user/jenkins/test-data/c531f78b-4684-755c-e834-db15031f05c3/data/default/TestHBaseWalOnEC/9668fa925f1116a6282af470b96a7ff0/cf/1700318a6f4849ec8917a4bf07f04e5f, entries=1, sequenceid=5, filesize=4.7 K 2024-12-09T01:51:25,779 INFO [RS_FLUSH_OPERATIONS-regionserver/ef6f18c58dc9:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~32 B/32, heapSize ~344 B/344, currentSize=0 B/0 for 9668fa925f1116a6282af470b96a7ff0 in 143ms, sequenceid=5, compaction requested=false 2024-12-09T01:51:25,780 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ef6f18c58dc9:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestHBaseWalOnEC' 2024-12-09T01:51:25,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34719 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-09T01:51:25,782 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ef6f18c58dc9:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for 9668fa925f1116a6282af470b96a7ff0: 2024-12-09T01:51:25,782 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ef6f18c58dc9:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestHBaseWalOnEC,,1733709084776.9668fa925f1116a6282af470b96a7ff0. 2024-12-09T01:51:25,783 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ef6f18c58dc9:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-12-09T01:51:25,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34719 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-12-09T01:51:25,793 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-12-09T01:51:25,793 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 318 msec 2024-12-09T01:51:25,797 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC in 333 msec 2024-12-09T01:51:26,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34719 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-09T01:51:26,093 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestHBaseWalOnEC completed 2024-12-09T01:51:26,109 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-09T01:51:26,110 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-09T01:51:26,110 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T01:51:26,115 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T01:51:26,116 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-09T01:51:26,116 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T01:51:26,117 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-09T01:51:26,117 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=63992445, stopped=false 2024-12-09T01:51:26,118 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=ef6f18c58dc9,34719,1733709081654 2024-12-09T01:51:26,120 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34719-0x1007478e1e60000, quorum=127.0.0.1:49937, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-09T01:51:26,120 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41167-0x1007478e1e60003, quorum=127.0.0.1:49937, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-09T01:51:26,120 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34719-0x1007478e1e60000, quorum=127.0.0.1:49937, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T01:51:26,120 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41167-0x1007478e1e60003, quorum=127.0.0.1:49937, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T01:51:26,120 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37065-0x1007478e1e60002, quorum=127.0.0.1:49937, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-09T01:51:26,120 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37065-0x1007478e1e60002, quorum=127.0.0.1:49937, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T01:51:26,120 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35707-0x1007478e1e60001, quorum=127.0.0.1:49937, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-09T01:51:26,121 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35707-0x1007478e1e60001, quorum=127.0.0.1:49937, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T01:51:26,121 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:41167-0x1007478e1e60003, quorum=127.0.0.1:49937, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T01:51:26,121 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-09T01:51:26,122 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:37065-0x1007478e1e60002, quorum=127.0.0.1:49937, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T01:51:26,122 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:35707-0x1007478e1e60001, quorum=127.0.0.1:49937, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T01:51:26,122 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-09T01:51:26,122 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T01:51:26,122 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T01:51:26,122 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:34719-0x1007478e1e60000, quorum=127.0.0.1:49937, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T01:51:26,123 INFO [RS:0;ef6f18c58dc9:35707 {}] regionserver.HRegionServer(878): Closing user regions 2024-12-09T01:51:26,123 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'ef6f18c58dc9,35707,1733709082350' ***** 2024-12-09T01:51:26,123 INFO [RS:0;ef6f18c58dc9:35707 {}] regionserver.HRegionServer(3091): Received CLOSE for 9668fa925f1116a6282af470b96a7ff0 2024-12-09T01:51:26,123 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-09T01:51:26,123 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'ef6f18c58dc9,37065,1733709082461' ***** 2024-12-09T01:51:26,123 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-09T01:51:26,123 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'ef6f18c58dc9,41167,1733709082511' ***** 2024-12-09T01:51:26,123 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-09T01:51:26,123 INFO [RS:2;ef6f18c58dc9:41167 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-09T01:51:26,123 INFO [RS:1;ef6f18c58dc9:37065 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-09T01:51:26,124 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-09T01:51:26,124 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-09T01:51:26,124 INFO [RS:2;ef6f18c58dc9:41167 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-09T01:51:26,124 INFO [RS:1;ef6f18c58dc9:37065 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-09T01:51:26,124 INFO [RS:2;ef6f18c58dc9:41167 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-09T01:51:26,124 INFO [RS:1;ef6f18c58dc9:37065 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-09T01:51:26,124 INFO [RS:2;ef6f18c58dc9:41167 {}] regionserver.HRegionServer(959): stopping server ef6f18c58dc9,41167,1733709082511 2024-12-09T01:51:26,124 INFO [RS:1;ef6f18c58dc9:37065 {}] regionserver.HRegionServer(959): stopping server ef6f18c58dc9,37065,1733709082461 2024-12-09T01:51:26,124 INFO [RS:1;ef6f18c58dc9:37065 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-09T01:51:26,124 INFO [RS:2;ef6f18c58dc9:41167 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-09T01:51:26,124 INFO [RS:1;ef6f18c58dc9:37065 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;ef6f18c58dc9:37065. 2024-12-09T01:51:26,124 INFO [RS:2;ef6f18c58dc9:41167 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:2;ef6f18c58dc9:41167. 2024-12-09T01:51:26,124 DEBUG [RS:1;ef6f18c58dc9:37065 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T01:51:26,125 DEBUG [RS:1;ef6f18c58dc9:37065 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T01:51:26,125 DEBUG [RS:2;ef6f18c58dc9:41167 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T01:51:26,125 DEBUG [RS:2;ef6f18c58dc9:41167 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T01:51:26,125 INFO [RS:1;ef6f18c58dc9:37065 {}] regionserver.HRegionServer(976): stopping server ef6f18c58dc9,37065,1733709082461; all regions closed. 2024-12-09T01:51:26,125 DEBUG [RS_CLOSE_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 9668fa925f1116a6282af470b96a7ff0, disabling compactions & flushes 2024-12-09T01:51:26,125 INFO [RS:2;ef6f18c58dc9:41167 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-09T01:51:26,125 INFO [RS:2;ef6f18c58dc9:41167 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-09T01:51:26,125 INFO [RS_CLOSE_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1733709084776.9668fa925f1116a6282af470b96a7ff0. 2024-12-09T01:51:26,125 INFO [RS:2;ef6f18c58dc9:41167 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-09T01:51:26,125 DEBUG [RS_CLOSE_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1733709084776.9668fa925f1116a6282af470b96a7ff0. 2024-12-09T01:51:26,125 DEBUG [RS_CLOSE_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1733709084776.9668fa925f1116a6282af470b96a7ff0. after waiting 0 ms 2024-12-09T01:51:26,125 INFO [RS:2;ef6f18c58dc9:41167 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-09T01:51:26,125 DEBUG [RS_CLOSE_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1733709084776.9668fa925f1116a6282af470b96a7ff0. 2024-12-09T01:51:26,126 INFO [RS:2;ef6f18c58dc9:41167 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-12-09T01:51:26,126 DEBUG [RS:2;ef6f18c58dc9:41167 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-12-09T01:51:26,126 DEBUG [RS_CLOSE_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-09T01:51:26,126 INFO [RS_CLOSE_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-09T01:51:26,126 DEBUG [RS_CLOSE_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-09T01:51:26,126 DEBUG [RS_CLOSE_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-09T01:51:26,126 DEBUG [RS_CLOSE_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-09T01:51:26,126 DEBUG [RS:2;ef6f18c58dc9:41167 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-12-09T01:51:26,126 INFO [RS:0;ef6f18c58dc9:35707 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-09T01:51:26,126 INFO [RS:0;ef6f18c58dc9:35707 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-09T01:51:26,126 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-09T01:51:26,126 INFO [RS_CLOSE_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.34 KB heapSize=3.38 KB 2024-12-09T01:51:26,126 INFO [RS:0;ef6f18c58dc9:35707 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-09T01:51:26,126 INFO [RS:0;ef6f18c58dc9:35707 {}] regionserver.HRegionServer(959): stopping server ef6f18c58dc9,35707,1733709082350 2024-12-09T01:51:26,126 INFO [RS:0;ef6f18c58dc9:35707 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-09T01:51:26,127 INFO [RS:0;ef6f18c58dc9:35707 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;ef6f18c58dc9:35707. 2024-12-09T01:51:26,127 DEBUG [RS:0;ef6f18c58dc9:35707 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T01:51:26,127 DEBUG [RS:0;ef6f18c58dc9:35707 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T01:51:26,127 INFO [RS:0;ef6f18c58dc9:35707 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-12-09T01:51:26,127 DEBUG [RS:0;ef6f18c58dc9:35707 {}] regionserver.HRegionServer(1325): Online Regions={9668fa925f1116a6282af470b96a7ff0=TestHBaseWalOnEC,,1733709084776.9668fa925f1116a6282af470b96a7ff0.} 2024-12-09T01:51:26,127 DEBUG [RS:0;ef6f18c58dc9:35707 {}] regionserver.HRegionServer(1351): Waiting on 9668fa925f1116a6282af470b96a7ff0 2024-12-09T01:51:26,142 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40555 is added to blk_1073741827_1017 (size=93) 2024-12-09T01:51:26,143 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35415 is added to blk_1073741827_1017 (size=93) 2024-12-09T01:51:26,143 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46601 is added to blk_1073741827_1017 (size=93) 2024-12-09T01:51:26,151 DEBUG [RS:1;ef6f18c58dc9:37065 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/c531f78b-4684-755c-e834-db15031f05c3/oldWALs 2024-12-09T01:51:26,151 INFO [RS:1;ef6f18c58dc9:37065 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL ef6f18c58dc9%2C37065%2C1733709082461:(num 1733709083875) 2024-12-09T01:51:26,151 DEBUG [RS:1;ef6f18c58dc9:37065 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T01:51:26,151 INFO [RS:1;ef6f18c58dc9:37065 {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T01:51:26,151 INFO [RS:1;ef6f18c58dc9:37065 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-09T01:51:26,152 INFO [RS:1;ef6f18c58dc9:37065 {}] hbase.ChoreService(370): Chore service for: regionserver/ef6f18c58dc9:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-12-09T01:51:26,152 INFO [RS:1;ef6f18c58dc9:37065 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-09T01:51:26,152 INFO [RS:1;ef6f18c58dc9:37065 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-09T01:51:26,152 INFO [regionserver/ef6f18c58dc9:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-09T01:51:26,152 INFO [RS:1;ef6f18c58dc9:37065 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-09T01:51:26,152 INFO [RS:1;ef6f18c58dc9:37065 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-09T01:51:26,153 INFO [RS:1;ef6f18c58dc9:37065 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:37065 2024-12-09T01:51:26,153 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46601 is added to blk_-9223372036854775756_1006 (size=196) 2024-12-09T01:51:26,157 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37065-0x1007478e1e60002, quorum=127.0.0.1:49937, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/ef6f18c58dc9,37065,1733709082461 2024-12-09T01:51:26,157 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34719-0x1007478e1e60000, quorum=127.0.0.1:49937, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-09T01:51:26,157 INFO [RS:1;ef6f18c58dc9:37065 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-09T01:51:26,159 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [ef6f18c58dc9,37065,1733709082461] 2024-12-09T01:51:26,161 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/ef6f18c58dc9,37065,1733709082461 already deleted, retry=false 2024-12-09T01:51:26,162 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; ef6f18c58dc9,37065,1733709082461 expired; onlineServers=2 2024-12-09T01:51:26,162 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35415 is added to blk_-9223372036854775773_1004 (size=42) 2024-12-09T01:51:26,163 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46601 is added to blk_-9223372036854775708_1013 (size=1321) 2024-12-09T01:51:26,163 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40555 is added to blk_-9223372036854775772_1004 (size=42) 2024-12-09T01:51:26,163 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35415 is added to blk_-9223372036854775709_1013 (size=1321) 2024-12-09T01:51:26,163 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40555 is added to blk_-9223372036854775757_1006 (size=196) 2024-12-09T01:51:26,164 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40555 is added to blk_-9223372036854775741_1008 (size=1189) 2024-12-09T01:51:26,166 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46601 is added to blk_-9223372036854775740_1008 (size=1189) 2024-12-09T01:51:26,173 DEBUG [RS_CLOSE_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41875/user/jenkins/test-data/c531f78b-4684-755c-e834-db15031f05c3/data/default/TestHBaseWalOnEC/9668fa925f1116a6282af470b96a7ff0/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-09T01:51:26,176 INFO [RS_CLOSE_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1733709084776.9668fa925f1116a6282af470b96a7ff0. 2024-12-09T01:51:26,176 DEBUG [RS_CLOSE_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 9668fa925f1116a6282af470b96a7ff0: Waiting for close lock at 1733709086124Running coprocessor pre-close hooks at 1733709086125 (+1 ms)Disabling compacts and flushes for region at 1733709086125Disabling writes for close at 1733709086125Writing region close event to WAL at 1733709086137 (+12 ms)Running coprocessor post-close hooks at 1733709086174 (+37 ms)Closed at 1733709086176 (+2 ms) 2024-12-09T01:51:26,177 DEBUG [RS_CLOSE_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestHBaseWalOnEC,,1733709084776.9668fa925f1116a6282af470b96a7ff0. 2024-12-09T01:51:26,177 DEBUG [RS_CLOSE_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41875/user/jenkins/test-data/c531f78b-4684-755c-e834-db15031f05c3/data/hbase/meta/1588230740/.tmp/info/97a6ab728fa8423db5530398ccb45a6f is 153, key is TestHBaseWalOnEC,,1733709084776.9668fa925f1116a6282af470b96a7ff0./info:regioninfo/1733709085238/Put/seqid=0 2024-12-09T01:51:26,181 WARN [RS_CLOSE_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T01:51:26,181 WARN [RS_CLOSE_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T01:51:26,185 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-547948635_22 at /127.0.0.1:59362 [Receiving block BP-559585578-172.17.0.2-1733709078255:blk_-9223372036854775632_1026] {}] datanode.DataXceiver(331): 127.0.0.1:35415:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:59362 dst: /127.0.0.1:35415 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T01:51:26,193 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35415 is added to blk_-9223372036854775632_1027 (size=6637) 2024-12-09T01:51:26,194 WARN [RS_CLOSE_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-09T01:51:26,194 INFO [RS_CLOSE_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.18 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:41875/user/jenkins/test-data/c531f78b-4684-755c-e834-db15031f05c3/data/hbase/meta/1588230740/.tmp/info/97a6ab728fa8423db5530398ccb45a6f 2024-12-09T01:51:26,202 INFO [regionserver/ef6f18c58dc9:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T01:51:26,208 INFO [regionserver/ef6f18c58dc9:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T01:51:26,210 INFO [regionserver/ef6f18c58dc9:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T01:51:26,241 DEBUG [RS_CLOSE_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41875/user/jenkins/test-data/c531f78b-4684-755c-e834-db15031f05c3/data/hbase/meta/1588230740/.tmp/ns/84e3c9fc4a5f43dc8af9c936431a2621 is 43, key is default/ns:d/1733709084522/Put/seqid=0 2024-12-09T01:51:26,243 WARN [RS_CLOSE_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T01:51:26,243 WARN [RS_CLOSE_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T01:51:26,247 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-547948635_22 at /127.0.0.1:34476 [Receiving block BP-559585578-172.17.0.2-1733709078255:blk_-9223372036854775616_1028] {}] datanode.DataXceiver(331): 127.0.0.1:46601:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34476 dst: /127.0.0.1:46601 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T01:51:26,252 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46601 is added to blk_-9223372036854775616_1029 (size=5153) 2024-12-09T01:51:26,252 WARN [RS_CLOSE_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-09T01:51:26,253 INFO [RS_CLOSE_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:41875/user/jenkins/test-data/c531f78b-4684-755c-e834-db15031f05c3/data/hbase/meta/1588230740/.tmp/ns/84e3c9fc4a5f43dc8af9c936431a2621 2024-12-09T01:51:26,260 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37065-0x1007478e1e60002, quorum=127.0.0.1:49937, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T01:51:26,260 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37065-0x1007478e1e60002, quorum=127.0.0.1:49937, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T01:51:26,261 INFO [RS:1;ef6f18c58dc9:37065 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-09T01:51:26,261 INFO [RS:1;ef6f18c58dc9:37065 {}] regionserver.HRegionServer(1031): Exiting; stopping=ef6f18c58dc9,37065,1733709082461; zookeeper connection closed. 2024-12-09T01:51:26,270 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@239040e2 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@239040e2 2024-12-09T01:51:26,283 DEBUG [RS_CLOSE_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41875/user/jenkins/test-data/c531f78b-4684-755c-e834-db15031f05c3/data/hbase/meta/1588230740/.tmp/table/8956b6ec12b940898a86afd9de12d9bd is 52, key is TestHBaseWalOnEC/table:state/1733709085253/Put/seqid=0 2024-12-09T01:51:26,286 WARN [RS_CLOSE_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T01:51:26,286 WARN [RS_CLOSE_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T01:51:26,292 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-547948635_22 at /127.0.0.1:59382 [Receiving block BP-559585578-172.17.0.2-1733709078255:blk_-9223372036854775600_1030] {}] datanode.DataXceiver(331): 127.0.0.1:35415:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:59382 dst: /127.0.0.1:35415 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T01:51:26,296 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35415 is added to blk_-9223372036854775600_1031 (size=5249) 2024-12-09T01:51:26,296 WARN [RS_CLOSE_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-09T01:51:26,297 INFO [RS_CLOSE_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=96 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:41875/user/jenkins/test-data/c531f78b-4684-755c-e834-db15031f05c3/data/hbase/meta/1588230740/.tmp/table/8956b6ec12b940898a86afd9de12d9bd 2024-12-09T01:51:26,308 DEBUG [RS_CLOSE_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41875/user/jenkins/test-data/c531f78b-4684-755c-e834-db15031f05c3/data/hbase/meta/1588230740/.tmp/info/97a6ab728fa8423db5530398ccb45a6f as hdfs://localhost:41875/user/jenkins/test-data/c531f78b-4684-755c-e834-db15031f05c3/data/hbase/meta/1588230740/info/97a6ab728fa8423db5530398ccb45a6f 2024-12-09T01:51:26,319 INFO [RS_CLOSE_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41875/user/jenkins/test-data/c531f78b-4684-755c-e834-db15031f05c3/data/hbase/meta/1588230740/info/97a6ab728fa8423db5530398ccb45a6f, entries=10, sequenceid=11, filesize=6.5 K 2024-12-09T01:51:26,320 DEBUG [RS_CLOSE_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41875/user/jenkins/test-data/c531f78b-4684-755c-e834-db15031f05c3/data/hbase/meta/1588230740/.tmp/ns/84e3c9fc4a5f43dc8af9c936431a2621 as hdfs://localhost:41875/user/jenkins/test-data/c531f78b-4684-755c-e834-db15031f05c3/data/hbase/meta/1588230740/ns/84e3c9fc4a5f43dc8af9c936431a2621 2024-12-09T01:51:26,326 DEBUG [RS:2;ef6f18c58dc9:41167 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-12-09T01:51:26,327 INFO [RS:0;ef6f18c58dc9:35707 {}] regionserver.HRegionServer(976): stopping server ef6f18c58dc9,35707,1733709082350; all regions closed. 2024-12-09T01:51:26,330 INFO [RS_CLOSE_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41875/user/jenkins/test-data/c531f78b-4684-755c-e834-db15031f05c3/data/hbase/meta/1588230740/ns/84e3c9fc4a5f43dc8af9c936431a2621, entries=2, sequenceid=11, filesize=5.0 K 2024-12-09T01:51:26,330 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40555 is added to blk_1073741828_1018 (size=1298) 2024-12-09T01:51:26,331 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46601 is added to blk_1073741828_1018 (size=1298) 2024-12-09T01:51:26,331 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35415 is added to blk_1073741828_1018 (size=1298) 2024-12-09T01:51:26,332 DEBUG [RS_CLOSE_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41875/user/jenkins/test-data/c531f78b-4684-755c-e834-db15031f05c3/data/hbase/meta/1588230740/.tmp/table/8956b6ec12b940898a86afd9de12d9bd as hdfs://localhost:41875/user/jenkins/test-data/c531f78b-4684-755c-e834-db15031f05c3/data/hbase/meta/1588230740/table/8956b6ec12b940898a86afd9de12d9bd 2024-12-09T01:51:26,334 DEBUG [RS:0;ef6f18c58dc9:35707 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/c531f78b-4684-755c-e834-db15031f05c3/oldWALs 2024-12-09T01:51:26,334 INFO [RS:0;ef6f18c58dc9:35707 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL ef6f18c58dc9%2C35707%2C1733709082350:(num 1733709083875) 2024-12-09T01:51:26,334 DEBUG [RS:0;ef6f18c58dc9:35707 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T01:51:26,334 INFO [RS:0;ef6f18c58dc9:35707 {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T01:51:26,334 INFO [RS:0;ef6f18c58dc9:35707 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-09T01:51:26,335 INFO [RS:0;ef6f18c58dc9:35707 {}] hbase.ChoreService(370): Chore service for: regionserver/ef6f18c58dc9:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-09T01:51:26,335 INFO [RS:0;ef6f18c58dc9:35707 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-09T01:51:26,335 INFO [RS:0;ef6f18c58dc9:35707 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-09T01:51:26,335 INFO [RS:0;ef6f18c58dc9:35707 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-09T01:51:26,335 INFO [RS:0;ef6f18c58dc9:35707 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-09T01:51:26,335 INFO [RS:0;ef6f18c58dc9:35707 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:35707 2024-12-09T01:51:26,335 INFO [regionserver/ef6f18c58dc9:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-09T01:51:26,337 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35707-0x1007478e1e60001, quorum=127.0.0.1:49937, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/ef6f18c58dc9,35707,1733709082350 2024-12-09T01:51:26,338 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34719-0x1007478e1e60000, quorum=127.0.0.1:49937, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-09T01:51:26,338 INFO [RS:0;ef6f18c58dc9:35707 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-09T01:51:26,339 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [ef6f18c58dc9,35707,1733709082350] 2024-12-09T01:51:26,341 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/ef6f18c58dc9,35707,1733709082350 already deleted, retry=false 2024-12-09T01:51:26,341 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; ef6f18c58dc9,35707,1733709082350 expired; onlineServers=1 2024-12-09T01:51:26,344 INFO [RS_CLOSE_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41875/user/jenkins/test-data/c531f78b-4684-755c-e834-db15031f05c3/data/hbase/meta/1588230740/table/8956b6ec12b940898a86afd9de12d9bd, entries=2, sequenceid=11, filesize=5.1 K 2024-12-09T01:51:26,345 INFO [RS_CLOSE_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 219ms, sequenceid=11, compaction requested=false 2024-12-09T01:51:26,345 DEBUG [RS_CLOSE_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-09T01:51:26,355 DEBUG [RS_CLOSE_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41875/user/jenkins/test-data/c531f78b-4684-755c-e834-db15031f05c3/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-12-09T01:51:26,356 DEBUG [RS_CLOSE_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-09T01:51:26,356 INFO [RS_CLOSE_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-09T01:51:26,356 DEBUG [RS_CLOSE_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733709086126Running coprocessor pre-close hooks at 1733709086126Disabling compacts and flushes for region at 1733709086126Disabling writes for close at 1733709086126Obtaining lock to block concurrent updates at 1733709086126Preparing flush snapshotting stores in 1588230740 at 1733709086126Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1377, getHeapSize=3392, getOffHeapSize=0, getCellsCount=14 at 1733709086127 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1733709086128 (+1 ms)Flushing 1588230740/info: creating writer at 1733709086128Flushing 1588230740/info: appending metadata at 1733709086172 (+44 ms)Flushing 1588230740/info: closing flushed file at 1733709086172Flushing 1588230740/ns: creating writer at 1733709086210 (+38 ms)Flushing 1588230740/ns: appending metadata at 1733709086238 (+28 ms)Flushing 1588230740/ns: closing flushed file at 1733709086238Flushing 1588230740/table: creating writer at 1733709086265 (+27 ms)Flushing 1588230740/table: appending metadata at 1733709086283 (+18 ms)Flushing 1588230740/table: closing flushed file at 1733709086283Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@10e2cc03: reopening flushed file at 1733709086307 (+24 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@9808199: reopening flushed file at 1733709086319 (+12 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@62006ef9: reopening flushed file at 1733709086331 (+12 ms)Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 219ms, sequenceid=11, compaction requested=false at 1733709086345 (+14 ms)Writing region close event to WAL at 1733709086348 (+3 ms)Running coprocessor post-close hooks at 1733709086356 (+8 ms)Closed at 1733709086356 2024-12-09T01:51:26,356 DEBUG [RS_CLOSE_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-09T01:51:26,440 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35707-0x1007478e1e60001, quorum=127.0.0.1:49937, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T01:51:26,440 INFO [RS:0;ef6f18c58dc9:35707 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-09T01:51:26,440 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35707-0x1007478e1e60001, quorum=127.0.0.1:49937, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T01:51:26,440 INFO [RS:0;ef6f18c58dc9:35707 {}] regionserver.HRegionServer(1031): Exiting; stopping=ef6f18c58dc9,35707,1733709082350; zookeeper connection closed. 2024-12-09T01:51:26,440 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@43ce0dde {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@43ce0dde 2024-12-09T01:51:26,527 INFO [RS:2;ef6f18c58dc9:41167 {}] regionserver.HRegionServer(976): stopping server ef6f18c58dc9,41167,1733709082511; all regions closed. 2024-12-09T01:51:26,530 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46601 is added to blk_1073741829_1019 (size=2751) 2024-12-09T01:51:26,530 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40555 is added to blk_1073741829_1019 (size=2751) 2024-12-09T01:51:26,531 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35415 is added to blk_1073741829_1019 (size=2751) 2024-12-09T01:51:26,534 DEBUG [RS:2;ef6f18c58dc9:41167 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/c531f78b-4684-755c-e834-db15031f05c3/oldWALs 2024-12-09T01:51:26,534 INFO [RS:2;ef6f18c58dc9:41167 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL ef6f18c58dc9%2C41167%2C1733709082511.meta:.meta(num 1733709084360) 2024-12-09T01:51:26,537 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46601 is added to blk_1073741826_1016 (size=93) 2024-12-09T01:51:26,538 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40555 is added to blk_1073741826_1016 (size=93) 2024-12-09T01:51:26,538 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35415 is added to blk_1073741826_1016 (size=93) 2024-12-09T01:51:26,541 DEBUG [RS:2;ef6f18c58dc9:41167 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/c531f78b-4684-755c-e834-db15031f05c3/oldWALs 2024-12-09T01:51:26,541 INFO [RS:2;ef6f18c58dc9:41167 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL ef6f18c58dc9%2C41167%2C1733709082511:(num 1733709083875) 2024-12-09T01:51:26,541 DEBUG [RS:2;ef6f18c58dc9:41167 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T01:51:26,541 INFO [RS:2;ef6f18c58dc9:41167 {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T01:51:26,541 INFO [RS:2;ef6f18c58dc9:41167 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-09T01:51:26,541 INFO [RS:2;ef6f18c58dc9:41167 {}] hbase.ChoreService(370): Chore service for: regionserver/ef6f18c58dc9:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-09T01:51:26,541 INFO [RS:2;ef6f18c58dc9:41167 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-09T01:51:26,541 INFO [regionserver/ef6f18c58dc9:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-09T01:51:26,541 INFO [RS:2;ef6f18c58dc9:41167 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:41167 2024-12-09T01:51:26,544 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41167-0x1007478e1e60003, quorum=127.0.0.1:49937, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/ef6f18c58dc9,41167,1733709082511 2024-12-09T01:51:26,544 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34719-0x1007478e1e60000, quorum=127.0.0.1:49937, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-09T01:51:26,544 INFO [RS:2;ef6f18c58dc9:41167 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-09T01:51:26,546 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [ef6f18c58dc9,41167,1733709082511] 2024-12-09T01:51:26,547 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/ef6f18c58dc9,41167,1733709082511 already deleted, retry=false 2024-12-09T01:51:26,547 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; ef6f18c58dc9,41167,1733709082511 expired; onlineServers=0 2024-12-09T01:51:26,547 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'ef6f18c58dc9,34719,1733709081654' ***** 2024-12-09T01:51:26,547 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-09T01:51:26,547 INFO [M:0;ef6f18c58dc9:34719 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-09T01:51:26,548 INFO [M:0;ef6f18c58dc9:34719 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-09T01:51:26,548 DEBUG [M:0;ef6f18c58dc9:34719 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-09T01:51:26,548 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-09T01:51:26,548 DEBUG [M:0;ef6f18c58dc9:34719 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-09T01:51:26,548 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster-HFileCleaner.small.0-1733709083547 {}] cleaner.HFileCleaner(306): Exit Thread[master/ef6f18c58dc9:0:becomeActiveMaster-HFileCleaner.small.0-1733709083547,5,FailOnTimeoutGroup] 2024-12-09T01:51:26,548 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster-HFileCleaner.large.0-1733709083545 {}] cleaner.HFileCleaner(306): Exit Thread[master/ef6f18c58dc9:0:becomeActiveMaster-HFileCleaner.large.0-1733709083545,5,FailOnTimeoutGroup] 2024-12-09T01:51:26,548 INFO [M:0;ef6f18c58dc9:34719 {}] hbase.ChoreService(370): Chore service for: master/ef6f18c58dc9:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-09T01:51:26,548 INFO [M:0;ef6f18c58dc9:34719 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-09T01:51:26,548 DEBUG [M:0;ef6f18c58dc9:34719 {}] master.HMaster(1795): Stopping service threads 2024-12-09T01:51:26,549 INFO [M:0;ef6f18c58dc9:34719 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-09T01:51:26,549 INFO [M:0;ef6f18c58dc9:34719 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-09T01:51:26,549 INFO [M:0;ef6f18c58dc9:34719 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-09T01:51:26,550 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-09T01:51:26,550 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34719-0x1007478e1e60000, quorum=127.0.0.1:49937, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-09T01:51:26,550 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34719-0x1007478e1e60000, quorum=127.0.0.1:49937, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T01:51:26,550 DEBUG [M:0;ef6f18c58dc9:34719 {}] zookeeper.ZKUtil(347): master:34719-0x1007478e1e60000, quorum=127.0.0.1:49937, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-09T01:51:26,550 WARN [M:0;ef6f18c58dc9:34719 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-09T01:51:26,551 INFO [M:0;ef6f18c58dc9:34719 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:41875/user/jenkins/test-data/c531f78b-4684-755c-e834-db15031f05c3/.lastflushedseqids 2024-12-09T01:51:26,561 WARN [M:0;ef6f18c58dc9:34719 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T01:51:26,561 WARN [M:0;ef6f18c58dc9:34719 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T01:51:26,563 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-2078043917_22 at /127.0.0.1:59400 [Receiving block BP-559585578-172.17.0.2-1733709078255:blk_-9223372036854775584_1032] {}] datanode.DataXceiver(331): 127.0.0.1:35415:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:59400 dst: /127.0.0.1:35415 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T01:51:26,567 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35415 is added to blk_-9223372036854775584_1033 (size=127) 2024-12-09T01:51:26,568 WARN [M:0;ef6f18c58dc9:34719 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-09T01:51:26,568 INFO [M:0;ef6f18c58dc9:34719 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-09T01:51:26,568 INFO [M:0;ef6f18c58dc9:34719 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-09T01:51:26,568 DEBUG [M:0;ef6f18c58dc9:34719 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-09T01:51:26,568 INFO [M:0;ef6f18c58dc9:34719 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T01:51:26,568 DEBUG [M:0;ef6f18c58dc9:34719 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T01:51:26,568 DEBUG [M:0;ef6f18c58dc9:34719 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-09T01:51:26,568 DEBUG [M:0;ef6f18c58dc9:34719 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T01:51:26,569 INFO [M:0;ef6f18c58dc9:34719 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=26.85 KB heapSize=34.13 KB 2024-12-09T01:51:26,588 DEBUG [M:0;ef6f18c58dc9:34719 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41875/user/jenkins/test-data/c531f78b-4684-755c-e834-db15031f05c3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/76c44e63d58e4a408b143955c560ba29 is 82, key is hbase:meta,,1/info:regioninfo/1733709084448/Put/seqid=0 2024-12-09T01:51:26,590 WARN [M:0;ef6f18c58dc9:34719 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T01:51:26,590 WARN [M:0;ef6f18c58dc9:34719 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T01:51:26,593 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-2078043917_22 at /127.0.0.1:34494 [Receiving block BP-559585578-172.17.0.2-1733709078255:blk_-9223372036854775568_1034] {}] datanode.DataXceiver(331): 127.0.0.1:46601:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34494 dst: /127.0.0.1:46601 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T01:51:26,597 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46601 is added to blk_-9223372036854775568_1035 (size=5672) 2024-12-09T01:51:26,598 WARN [M:0;ef6f18c58dc9:34719 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-09T01:51:26,598 INFO [M:0;ef6f18c58dc9:34719 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:41875/user/jenkins/test-data/c531f78b-4684-755c-e834-db15031f05c3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/76c44e63d58e4a408b143955c560ba29 2024-12-09T01:51:26,623 DEBUG [M:0;ef6f18c58dc9:34719 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41875/user/jenkins/test-data/c531f78b-4684-755c-e834-db15031f05c3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/ff3c2d48cbfe4ebe84a29697db35f624 is 749, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1733709085261/Put/seqid=0 2024-12-09T01:51:26,625 WARN [M:0;ef6f18c58dc9:34719 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T01:51:26,625 WARN [M:0;ef6f18c58dc9:34719 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T01:51:26,629 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-2078043917_22 at /127.0.0.1:59406 [Receiving block BP-559585578-172.17.0.2-1733709078255:blk_-9223372036854775552_1036] {}] datanode.DataXceiver(331): 127.0.0.1:35415:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:59406 dst: /127.0.0.1:35415 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T01:51:26,633 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35415 is added to blk_-9223372036854775552_1037 (size=6441) 2024-12-09T01:51:26,634 WARN [M:0;ef6f18c58dc9:34719 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-09T01:51:26,634 INFO [M:0;ef6f18c58dc9:34719 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.17 KB at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:41875/user/jenkins/test-data/c531f78b-4684-755c-e834-db15031f05c3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/ff3c2d48cbfe4ebe84a29697db35f624 2024-12-09T01:51:26,646 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41167-0x1007478e1e60003, quorum=127.0.0.1:49937, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T01:51:26,646 INFO [RS:2;ef6f18c58dc9:41167 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-09T01:51:26,646 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41167-0x1007478e1e60003, quorum=127.0.0.1:49937, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T01:51:26,646 INFO [RS:2;ef6f18c58dc9:41167 {}] regionserver.HRegionServer(1031): Exiting; stopping=ef6f18c58dc9,41167,1733709082511; zookeeper connection closed. 2024-12-09T01:51:26,646 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@6ffdad1 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@6ffdad1 2024-12-09T01:51:26,647 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 3 regionserver(s) complete 2024-12-09T01:51:26,660 DEBUG [M:0;ef6f18c58dc9:34719 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41875/user/jenkins/test-data/c531f78b-4684-755c-e834-db15031f05c3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/9164f79dd53541f1b7bbb3cb545721c0 is 69, key is ef6f18c58dc9,35707,1733709082350/rs:state/1733709083582/Put/seqid=0 2024-12-09T01:51:26,662 WARN [M:0;ef6f18c58dc9:34719 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T01:51:26,662 WARN [M:0;ef6f18c58dc9:34719 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T01:51:26,665 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-2078043917_22 at /127.0.0.1:38912 [Receiving block BP-559585578-172.17.0.2-1733709078255:blk_-9223372036854775536_1038] {}] datanode.DataXceiver(331): 127.0.0.1:40555:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:38912 dst: /127.0.0.1:40555 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T01:51:26,668 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40555 is added to blk_-9223372036854775536_1039 (size=5294) 2024-12-09T01:51:26,669 WARN [M:0;ef6f18c58dc9:34719 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-09T01:51:26,669 INFO [M:0;ef6f18c58dc9:34719 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=195 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:41875/user/jenkins/test-data/c531f78b-4684-755c-e834-db15031f05c3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/9164f79dd53541f1b7bbb3cb545721c0 2024-12-09T01:51:26,680 DEBUG [M:0;ef6f18c58dc9:34719 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41875/user/jenkins/test-data/c531f78b-4684-755c-e834-db15031f05c3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/76c44e63d58e4a408b143955c560ba29 as hdfs://localhost:41875/user/jenkins/test-data/c531f78b-4684-755c-e834-db15031f05c3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/76c44e63d58e4a408b143955c560ba29 2024-12-09T01:51:26,690 INFO [M:0;ef6f18c58dc9:34719 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41875/user/jenkins/test-data/c531f78b-4684-755c-e834-db15031f05c3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/76c44e63d58e4a408b143955c560ba29, entries=8, sequenceid=72, filesize=5.5 K 2024-12-09T01:51:26,692 DEBUG [M:0;ef6f18c58dc9:34719 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41875/user/jenkins/test-data/c531f78b-4684-755c-e834-db15031f05c3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/ff3c2d48cbfe4ebe84a29697db35f624 as hdfs://localhost:41875/user/jenkins/test-data/c531f78b-4684-755c-e834-db15031f05c3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/ff3c2d48cbfe4ebe84a29697db35f624 2024-12-09T01:51:26,701 INFO [M:0;ef6f18c58dc9:34719 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41875/user/jenkins/test-data/c531f78b-4684-755c-e834-db15031f05c3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/ff3c2d48cbfe4ebe84a29697db35f624, entries=8, sequenceid=72, filesize=6.3 K 2024-12-09T01:51:26,703 DEBUG [M:0;ef6f18c58dc9:34719 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41875/user/jenkins/test-data/c531f78b-4684-755c-e834-db15031f05c3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/9164f79dd53541f1b7bbb3cb545721c0 as hdfs://localhost:41875/user/jenkins/test-data/c531f78b-4684-755c-e834-db15031f05c3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/9164f79dd53541f1b7bbb3cb545721c0 2024-12-09T01:51:26,711 INFO [M:0;ef6f18c58dc9:34719 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41875/user/jenkins/test-data/c531f78b-4684-755c-e834-db15031f05c3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/9164f79dd53541f1b7bbb3cb545721c0, entries=3, sequenceid=72, filesize=5.2 K 2024-12-09T01:51:26,713 INFO [M:0;ef6f18c58dc9:34719 {}] regionserver.HRegion(3140): Finished flush of dataSize ~26.85 KB/27492, heapSize ~33.84 KB/34648, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 145ms, sequenceid=72, compaction requested=false 2024-12-09T01:51:26,715 INFO [M:0;ef6f18c58dc9:34719 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T01:51:26,715 DEBUG [M:0;ef6f18c58dc9:34719 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733709086568Disabling compacts and flushes for region at 1733709086568Disabling writes for close at 1733709086568Obtaining lock to block concurrent updates at 1733709086569 (+1 ms)Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733709086569Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=27492, getHeapSize=34888, getOffHeapSize=0, getCellsCount=85 at 1733709086569Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733709086570 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733709086570Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733709086588 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733709086588Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733709086606 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733709086623 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733709086623Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733709086642 (+19 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733709086659 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733709086659Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1c3bbc0b: reopening flushed file at 1733709086678 (+19 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3c615d19: reopening flushed file at 1733709086690 (+12 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@180fd209: reopening flushed file at 1733709086701 (+11 ms)Finished flush of dataSize ~26.85 KB/27492, heapSize ~33.84 KB/34648, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 145ms, sequenceid=72, compaction requested=false at 1733709086713 (+12 ms)Writing region close event to WAL at 1733709086715 (+2 ms)Closed at 1733709086715 2024-12-09T01:51:26,718 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35415 is added to blk_1073741825_1011 (size=32695) 2024-12-09T01:51:26,719 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46601 is added to blk_1073741825_1011 (size=32695) 2024-12-09T01:51:26,719 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40555 is added to blk_1073741825_1011 (size=32695) 2024-12-09T01:51:26,720 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-09T01:51:26,720 INFO [M:0;ef6f18c58dc9:34719 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-09T01:51:26,720 INFO [M:0;ef6f18c58dc9:34719 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:34719 2024-12-09T01:51:26,721 INFO [M:0;ef6f18c58dc9:34719 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-09T01:51:26,824 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34719-0x1007478e1e60000, quorum=127.0.0.1:49937, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T01:51:26,824 INFO [M:0;ef6f18c58dc9:34719 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-09T01:51:26,824 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34719-0x1007478e1e60000, quorum=127.0.0.1:49937, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T01:51:26,829 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@653e6301{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T01:51:26,831 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@404caff2{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T01:51:26,831 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T01:51:26,831 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@343b36c2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T01:51:26,832 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@35e2f174{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5e934fc9-80e3-c528-8311-02add2e6ba80/hadoop.log.dir/,STOPPED} 2024-12-09T01:51:26,834 WARN [BP-559585578-172.17.0.2-1733709078255 heartbeating to localhost/127.0.0.1:41875 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T01:51:26,834 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T01:51:26,834 WARN [BP-559585578-172.17.0.2-1733709078255 heartbeating to localhost/127.0.0.1:41875 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-559585578-172.17.0.2-1733709078255 (Datanode Uuid 52791522-8ac7-4c11-95a1-5eccf43a6261) service to localhost/127.0.0.1:41875 2024-12-09T01:51:26,834 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T01:51:26,836 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5e934fc9-80e3-c528-8311-02add2e6ba80/cluster_a72df13a-320a-afec-fc01-7ad321829863/data/data5/current/BP-559585578-172.17.0.2-1733709078255 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T01:51:26,836 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5e934fc9-80e3-c528-8311-02add2e6ba80/cluster_a72df13a-320a-afec-fc01-7ad321829863/data/data6/current/BP-559585578-172.17.0.2-1733709078255 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T01:51:26,837 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T01:51:26,839 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@513cab2c{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T01:51:26,839 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@29a123ec{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T01:51:26,839 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T01:51:26,839 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6af5a446{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T01:51:26,839 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@444b27d4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5e934fc9-80e3-c528-8311-02add2e6ba80/hadoop.log.dir/,STOPPED} 2024-12-09T01:51:26,841 WARN [BP-559585578-172.17.0.2-1733709078255 heartbeating to localhost/127.0.0.1:41875 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T01:51:26,841 WARN [BP-559585578-172.17.0.2-1733709078255 heartbeating to localhost/127.0.0.1:41875 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-559585578-172.17.0.2-1733709078255 (Datanode Uuid af8a4047-b43f-4e4a-a3ed-218f5a14e69e) service to localhost/127.0.0.1:41875 2024-12-09T01:51:26,841 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T01:51:26,841 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T01:51:26,842 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5e934fc9-80e3-c528-8311-02add2e6ba80/cluster_a72df13a-320a-afec-fc01-7ad321829863/data/data3/current/BP-559585578-172.17.0.2-1733709078255 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T01:51:26,842 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5e934fc9-80e3-c528-8311-02add2e6ba80/cluster_a72df13a-320a-afec-fc01-7ad321829863/data/data4/current/BP-559585578-172.17.0.2-1733709078255 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T01:51:26,842 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T01:51:26,849 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@65462677{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T01:51:26,849 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@383014b{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T01:51:26,849 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T01:51:26,849 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4dc262e0{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T01:51:26,849 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@431e53b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5e934fc9-80e3-c528-8311-02add2e6ba80/hadoop.log.dir/,STOPPED} 2024-12-09T01:51:26,851 WARN [BP-559585578-172.17.0.2-1733709078255 heartbeating to localhost/127.0.0.1:41875 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T01:51:26,851 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T01:51:26,851 WARN [BP-559585578-172.17.0.2-1733709078255 heartbeating to localhost/127.0.0.1:41875 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-559585578-172.17.0.2-1733709078255 (Datanode Uuid eeced6e1-2a67-42be-81fd-fc091dd60af3) service to localhost/127.0.0.1:41875 2024-12-09T01:51:26,851 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T01:51:26,851 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5e934fc9-80e3-c528-8311-02add2e6ba80/cluster_a72df13a-320a-afec-fc01-7ad321829863/data/data1/current/BP-559585578-172.17.0.2-1733709078255 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T01:51:26,851 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5e934fc9-80e3-c528-8311-02add2e6ba80/cluster_a72df13a-320a-afec-fc01-7ad321829863/data/data2/current/BP-559585578-172.17.0.2-1733709078255 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T01:51:26,852 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T01:51:26,860 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@58dbf239{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-09T01:51:26,860 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@13e2962d{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T01:51:26,860 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T01:51:26,861 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4f93dd{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T01:51:26,861 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4395d44b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5e934fc9-80e3-c528-8311-02add2e6ba80/hadoop.log.dir/,STOPPED} 2024-12-09T01:51:26,869 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-09T01:51:26,898 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-09T01:51:26,907 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestHBaseWalOnEC#testReadWrite[0] Thread=88 (was 158), OpenFileDescriptor=441 (was 391) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=259 (was 255) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=11762 (was 12031) 2024-12-09T01:51:26,914 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestHBaseWalOnEC#testReadWrite[1] Thread=88, OpenFileDescriptor=441, MaxFileDescriptor=1048576, SystemLoadAverage=259, ProcessCount=11, AvailableMemoryMB=11762 2024-12-09T01:51:26,915 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=3, rsPorts=, rsClass=null, numDataNodes=3, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-09T01:51:26,915 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5e934fc9-80e3-c528-8311-02add2e6ba80/hadoop.log.dir so I do NOT create it in target/test-data/082ca5d0-a058-59eb-cf86-9d46374d7e98 2024-12-09T01:51:26,915 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5e934fc9-80e3-c528-8311-02add2e6ba80/hadoop.tmp.dir so I do NOT create it in target/test-data/082ca5d0-a058-59eb-cf86-9d46374d7e98 2024-12-09T01:51:26,915 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/082ca5d0-a058-59eb-cf86-9d46374d7e98/cluster_fcc1ab3e-2194-8be5-a30b-b316844c467b, deleteOnExit=true 2024-12-09T01:51:26,915 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-09T01:51:26,915 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/082ca5d0-a058-59eb-cf86-9d46374d7e98/test.cache.data in system properties and HBase conf 2024-12-09T01:51:26,916 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/082ca5d0-a058-59eb-cf86-9d46374d7e98/hadoop.tmp.dir in system properties and HBase conf 2024-12-09T01:51:26,916 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/082ca5d0-a058-59eb-cf86-9d46374d7e98/hadoop.log.dir in system properties and HBase conf 2024-12-09T01:51:26,916 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/082ca5d0-a058-59eb-cf86-9d46374d7e98/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-09T01:51:26,916 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/082ca5d0-a058-59eb-cf86-9d46374d7e98/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-09T01:51:26,916 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-09T01:51:26,916 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-09T01:51:26,916 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/082ca5d0-a058-59eb-cf86-9d46374d7e98/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-09T01:51:26,916 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/082ca5d0-a058-59eb-cf86-9d46374d7e98/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-09T01:51:26,917 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/082ca5d0-a058-59eb-cf86-9d46374d7e98/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-09T01:51:26,917 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/082ca5d0-a058-59eb-cf86-9d46374d7e98/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-09T01:51:26,917 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/082ca5d0-a058-59eb-cf86-9d46374d7e98/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-09T01:51:26,917 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/082ca5d0-a058-59eb-cf86-9d46374d7e98/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-09T01:51:26,917 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/082ca5d0-a058-59eb-cf86-9d46374d7e98/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-09T01:51:26,917 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/082ca5d0-a058-59eb-cf86-9d46374d7e98/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-09T01:51:26,917 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/082ca5d0-a058-59eb-cf86-9d46374d7e98/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-09T01:51:26,917 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/082ca5d0-a058-59eb-cf86-9d46374d7e98/nfs.dump.dir in system properties and HBase conf 2024-12-09T01:51:26,917 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/082ca5d0-a058-59eb-cf86-9d46374d7e98/java.io.tmpdir in system properties and HBase conf 2024-12-09T01:51:26,917 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/082ca5d0-a058-59eb-cf86-9d46374d7e98/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-09T01:51:26,917 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/082ca5d0-a058-59eb-cf86-9d46374d7e98/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-09T01:51:26,917 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/082ca5d0-a058-59eb-cf86-9d46374d7e98/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-09T01:51:27,006 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T01:51:27,011 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T01:51:27,013 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T01:51:27,013 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T01:51:27,013 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-09T01:51:27,013 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T01:51:27,014 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@39a1ffce{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/082ca5d0-a058-59eb-cf86-9d46374d7e98/hadoop.log.dir/,AVAILABLE} 2024-12-09T01:51:27,015 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@58cabbb9{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T01:51:27,131 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5265f235{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/082ca5d0-a058-59eb-cf86-9d46374d7e98/java.io.tmpdir/jetty-localhost-46565-hadoop-hdfs-3_4_1-tests_jar-_-any-16250626558724987016/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-09T01:51:27,131 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@206988bd{HTTP/1.1, (http/1.1)}{localhost:46565} 2024-12-09T01:51:27,132 INFO [Time-limited test {}] server.Server(415): Started @10878ms 2024-12-09T01:51:27,217 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T01:51:27,221 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T01:51:27,221 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T01:51:27,221 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T01:51:27,221 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-09T01:51:27,222 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5657ac77{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/082ca5d0-a058-59eb-cf86-9d46374d7e98/hadoop.log.dir/,AVAILABLE} 2024-12-09T01:51:27,222 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3dae6551{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T01:51:27,339 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@71005823{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/082ca5d0-a058-59eb-cf86-9d46374d7e98/java.io.tmpdir/jetty-localhost-41459-hadoop-hdfs-3_4_1-tests_jar-_-any-709462742068094712/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T01:51:27,339 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@10d3673d{HTTP/1.1, (http/1.1)}{localhost:41459} 2024-12-09T01:51:27,339 INFO [Time-limited test {}] server.Server(415): Started @11086ms 2024-12-09T01:51:27,341 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T01:51:27,379 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T01:51:27,383 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T01:51:27,384 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T01:51:27,384 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T01:51:27,384 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-09T01:51:27,391 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2aeebd66{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/082ca5d0-a058-59eb-cf86-9d46374d7e98/hadoop.log.dir/,AVAILABLE} 2024-12-09T01:51:27,392 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5c077530{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T01:51:27,452 WARN [Thread-523 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/082ca5d0-a058-59eb-cf86-9d46374d7e98/cluster_fcc1ab3e-2194-8be5-a30b-b316844c467b/data/data1/current/BP-403251070-172.17.0.2-1733709086950/current, will proceed with Du for space computation calculation, 2024-12-09T01:51:27,452 WARN [Thread-524 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/082ca5d0-a058-59eb-cf86-9d46374d7e98/cluster_fcc1ab3e-2194-8be5-a30b-b316844c467b/data/data2/current/BP-403251070-172.17.0.2-1733709086950/current, will proceed with Du for space computation calculation, 2024-12-09T01:51:27,473 WARN [Thread-502 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T01:51:27,476 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xd4dec939dea1dac with lease ID 0x52220a60db7ebc0c: Processing first storage report for DS-1c5c2151-de41-43c0-bec4-8ac66f6644d6 from datanode DatanodeRegistration(127.0.0.1:42527, datanodeUuid=f188dbd1-73f4-4e6d-a520-8eac83fceca8, infoPort=42581, infoSecurePort=0, ipcPort=33531, storageInfo=lv=-57;cid=testClusterID;nsid=66592705;c=1733709086950) 2024-12-09T01:51:27,476 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd4dec939dea1dac with lease ID 0x52220a60db7ebc0c: from storage DS-1c5c2151-de41-43c0-bec4-8ac66f6644d6 node DatanodeRegistration(127.0.0.1:42527, datanodeUuid=f188dbd1-73f4-4e6d-a520-8eac83fceca8, infoPort=42581, infoSecurePort=0, ipcPort=33531, storageInfo=lv=-57;cid=testClusterID;nsid=66592705;c=1733709086950), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T01:51:27,476 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xd4dec939dea1dac with lease ID 0x52220a60db7ebc0c: Processing first storage report for DS-5d75572a-de86-4ec7-8c34-78875d57151c from datanode DatanodeRegistration(127.0.0.1:42527, datanodeUuid=f188dbd1-73f4-4e6d-a520-8eac83fceca8, infoPort=42581, infoSecurePort=0, ipcPort=33531, storageInfo=lv=-57;cid=testClusterID;nsid=66592705;c=1733709086950) 2024-12-09T01:51:27,476 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd4dec939dea1dac with lease ID 0x52220a60db7ebc0c: from storage DS-5d75572a-de86-4ec7-8c34-78875d57151c node DatanodeRegistration(127.0.0.1:42527, datanodeUuid=f188dbd1-73f4-4e6d-a520-8eac83fceca8, infoPort=42581, infoSecurePort=0, ipcPort=33531, storageInfo=lv=-57;cid=testClusterID;nsid=66592705;c=1733709086950), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T01:51:27,509 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7f06ea44{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/082ca5d0-a058-59eb-cf86-9d46374d7e98/java.io.tmpdir/jetty-localhost-46871-hadoop-hdfs-3_4_1-tests_jar-_-any-15157767683312232621/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T01:51:27,509 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4b030921{HTTP/1.1, (http/1.1)}{localhost:46871} 2024-12-09T01:51:27,509 INFO [Time-limited test {}] server.Server(415): Started @11256ms 2024-12-09T01:51:27,511 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T01:51:27,541 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T01:51:27,544 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T01:51:27,545 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T01:51:27,545 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T01:51:27,545 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-09T01:51:27,548 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@51a41d0b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/082ca5d0-a058-59eb-cf86-9d46374d7e98/hadoop.log.dir/,AVAILABLE} 2024-12-09T01:51:27,548 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1b15d8c9{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T01:51:27,607 WARN [Thread-559 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/082ca5d0-a058-59eb-cf86-9d46374d7e98/cluster_fcc1ab3e-2194-8be5-a30b-b316844c467b/data/data4/current/BP-403251070-172.17.0.2-1733709086950/current, will proceed with Du for space computation calculation, 2024-12-09T01:51:27,607 WARN [Thread-558 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/082ca5d0-a058-59eb-cf86-9d46374d7e98/cluster_fcc1ab3e-2194-8be5-a30b-b316844c467b/data/data3/current/BP-403251070-172.17.0.2-1733709086950/current, will proceed with Du for space computation calculation, 2024-12-09T01:51:27,626 WARN [Thread-538 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T01:51:27,630 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xcc6380cbbbb91ffb with lease ID 0x52220a60db7ebc0d: Processing first storage report for DS-780a734c-98b8-47f5-8b11-bca9ed884ab5 from datanode DatanodeRegistration(127.0.0.1:40449, datanodeUuid=3d30c5e5-d547-4dcb-af4c-48945ec8e8f6, infoPort=32807, infoSecurePort=0, ipcPort=45631, storageInfo=lv=-57;cid=testClusterID;nsid=66592705;c=1733709086950) 2024-12-09T01:51:27,630 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xcc6380cbbbb91ffb with lease ID 0x52220a60db7ebc0d: from storage DS-780a734c-98b8-47f5-8b11-bca9ed884ab5 node DatanodeRegistration(127.0.0.1:40449, datanodeUuid=3d30c5e5-d547-4dcb-af4c-48945ec8e8f6, infoPort=32807, infoSecurePort=0, ipcPort=45631, storageInfo=lv=-57;cid=testClusterID;nsid=66592705;c=1733709086950), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-09T01:51:27,630 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xcc6380cbbbb91ffb with lease ID 0x52220a60db7ebc0d: Processing first storage report for DS-34381389-19f2-4526-83a1-21dac151b6cd from datanode DatanodeRegistration(127.0.0.1:40449, datanodeUuid=3d30c5e5-d547-4dcb-af4c-48945ec8e8f6, infoPort=32807, infoSecurePort=0, ipcPort=45631, storageInfo=lv=-57;cid=testClusterID;nsid=66592705;c=1733709086950) 2024-12-09T01:51:27,630 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xcc6380cbbbb91ffb with lease ID 0x52220a60db7ebc0d: from storage DS-34381389-19f2-4526-83a1-21dac151b6cd node DatanodeRegistration(127.0.0.1:40449, datanodeUuid=3d30c5e5-d547-4dcb-af4c-48945ec8e8f6, infoPort=32807, infoSecurePort=0, ipcPort=45631, storageInfo=lv=-57;cid=testClusterID;nsid=66592705;c=1733709086950), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T01:51:27,682 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1ef8f7bc{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/082ca5d0-a058-59eb-cf86-9d46374d7e98/java.io.tmpdir/jetty-localhost-35579-hadoop-hdfs-3_4_1-tests_jar-_-any-10282647790875914515/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T01:51:27,683 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@12529ca{HTTP/1.1, (http/1.1)}{localhost:35579} 2024-12-09T01:51:27,683 INFO [Time-limited test {}] server.Server(415): Started @11429ms 2024-12-09T01:51:27,685 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T01:51:27,802 WARN [Thread-585 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/082ca5d0-a058-59eb-cf86-9d46374d7e98/cluster_fcc1ab3e-2194-8be5-a30b-b316844c467b/data/data6/current/BP-403251070-172.17.0.2-1733709086950/current, will proceed with Du for space computation calculation, 2024-12-09T01:51:27,802 WARN [Thread-584 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/082ca5d0-a058-59eb-cf86-9d46374d7e98/cluster_fcc1ab3e-2194-8be5-a30b-b316844c467b/data/data5/current/BP-403251070-172.17.0.2-1733709086950/current, will proceed with Du for space computation calculation, 2024-12-09T01:51:27,828 WARN [Thread-573 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T01:51:27,832 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x7b536315bd6eba63 with lease ID 0x52220a60db7ebc0e: Processing first storage report for DS-94fda5fa-6a4a-4486-9bd6-e2a3f82dcdef from datanode DatanodeRegistration(127.0.0.1:37693, datanodeUuid=274dce51-4584-441b-a07d-f3c78c826bcc, infoPort=33653, infoSecurePort=0, ipcPort=35067, storageInfo=lv=-57;cid=testClusterID;nsid=66592705;c=1733709086950) 2024-12-09T01:51:27,832 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x7b536315bd6eba63 with lease ID 0x52220a60db7ebc0e: from storage DS-94fda5fa-6a4a-4486-9bd6-e2a3f82dcdef node DatanodeRegistration(127.0.0.1:37693, datanodeUuid=274dce51-4584-441b-a07d-f3c78c826bcc, infoPort=33653, infoSecurePort=0, ipcPort=35067, storageInfo=lv=-57;cid=testClusterID;nsid=66592705;c=1733709086950), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T01:51:27,832 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x7b536315bd6eba63 with lease ID 0x52220a60db7ebc0e: Processing first storage report for DS-64bba25f-db99-4979-8d51-c552af501db5 from datanode DatanodeRegistration(127.0.0.1:37693, datanodeUuid=274dce51-4584-441b-a07d-f3c78c826bcc, infoPort=33653, infoSecurePort=0, ipcPort=35067, storageInfo=lv=-57;cid=testClusterID;nsid=66592705;c=1733709086950) 2024-12-09T01:51:27,832 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x7b536315bd6eba63 with lease ID 0x52220a60db7ebc0e: from storage DS-64bba25f-db99-4979-8d51-c552af501db5 node DatanodeRegistration(127.0.0.1:37693, datanodeUuid=274dce51-4584-441b-a07d-f3c78c826bcc, infoPort=33653, infoSecurePort=0, ipcPort=35067, storageInfo=lv=-57;cid=testClusterID;nsid=66592705;c=1733709086950), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T01:51:27,924 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/082ca5d0-a058-59eb-cf86-9d46374d7e98 2024-12-09T01:51:27,927 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/082ca5d0-a058-59eb-cf86-9d46374d7e98/cluster_fcc1ab3e-2194-8be5-a30b-b316844c467b/zookeeper_0, clientPort=63103, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/082ca5d0-a058-59eb-cf86-9d46374d7e98/cluster_fcc1ab3e-2194-8be5-a30b-b316844c467b/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/082ca5d0-a058-59eb-cf86-9d46374d7e98/cluster_fcc1ab3e-2194-8be5-a30b-b316844c467b/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-09T01:51:27,928 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=63103 2024-12-09T01:51:27,928 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T01:51:27,930 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T01:51:27,944 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42527 is added to blk_1073741825_1001 (size=7) 2024-12-09T01:51:27,944 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40449 is added to blk_1073741825_1001 (size=7) 2024-12-09T01:51:27,945 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37693 is added to blk_1073741825_1001 (size=7) 2024-12-09T01:51:27,946 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:45333/user/jenkins/test-data/8cc46a52-16d6-1efc-b9f5-b9a44f32340c with version=8 2024-12-09T01:51:27,947 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:41875/user/jenkins/test-data/c531f78b-4684-755c-e834-db15031f05c3/hbase-staging 2024-12-09T01:51:27,949 INFO [Time-limited test {}] client.ConnectionUtils(128): master/ef6f18c58dc9:0 server-side Connection retries=45 2024-12-09T01:51:27,949 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T01:51:27,949 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-09T01:51:27,949 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-09T01:51:27,949 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T01:51:27,949 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-09T01:51:27,949 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-09T01:51:27,949 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-09T01:51:27,950 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:33921 2024-12-09T01:51:27,951 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:33921 connecting to ZooKeeper ensemble=127.0.0.1:63103 2024-12-09T01:51:27,958 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:339210x0, quorum=127.0.0.1:63103, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-09T01:51:27,958 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:33921-0x1007478fd9d0000 connected 2024-12-09T01:51:27,976 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T01:51:27,977 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T01:51:27,979 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:33921-0x1007478fd9d0000, quorum=127.0.0.1:63103, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T01:51:27,979 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:45333/user/jenkins/test-data/8cc46a52-16d6-1efc-b9f5-b9a44f32340c, hbase.cluster.distributed=false 2024-12-09T01:51:27,981 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:33921-0x1007478fd9d0000, quorum=127.0.0.1:63103, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-09T01:51:27,982 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=33921 2024-12-09T01:51:27,985 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=33921 2024-12-09T01:51:27,986 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=33921 2024-12-09T01:51:27,987 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=33921 2024-12-09T01:51:27,987 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=33921 2024-12-09T01:51:28,003 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/ef6f18c58dc9:0 server-side Connection retries=45 2024-12-09T01:51:28,003 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T01:51:28,003 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-09T01:51:28,003 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-09T01:51:28,003 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T01:51:28,003 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-09T01:51:28,003 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-09T01:51:28,003 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-09T01:51:28,004 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:46367 2024-12-09T01:51:28,005 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:46367 connecting to ZooKeeper ensemble=127.0.0.1:63103 2024-12-09T01:51:28,006 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T01:51:28,008 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T01:51:28,014 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:463670x0, quorum=127.0.0.1:63103, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-09T01:51:28,014 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:46367-0x1007478fd9d0001 connected 2024-12-09T01:51:28,014 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46367-0x1007478fd9d0001, quorum=127.0.0.1:63103, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T01:51:28,015 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-09T01:51:28,015 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-09T01:51:28,016 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46367-0x1007478fd9d0001, quorum=127.0.0.1:63103, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-09T01:51:28,018 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46367-0x1007478fd9d0001, quorum=127.0.0.1:63103, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-09T01:51:28,018 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=46367 2024-12-09T01:51:28,018 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=46367 2024-12-09T01:51:28,019 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=46367 2024-12-09T01:51:28,019 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=46367 2024-12-09T01:51:28,019 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=46367 2024-12-09T01:51:28,034 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/ef6f18c58dc9:0 server-side Connection retries=45 2024-12-09T01:51:28,034 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T01:51:28,034 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-09T01:51:28,034 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-09T01:51:28,034 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T01:51:28,034 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-09T01:51:28,034 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-09T01:51:28,035 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-09T01:51:28,035 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:41835 2024-12-09T01:51:28,036 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:41835 connecting to ZooKeeper ensemble=127.0.0.1:63103 2024-12-09T01:51:28,037 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T01:51:28,038 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T01:51:28,043 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:418350x0, quorum=127.0.0.1:63103, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-09T01:51:28,043 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:41835-0x1007478fd9d0002 connected 2024-12-09T01:51:28,043 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41835-0x1007478fd9d0002, quorum=127.0.0.1:63103, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T01:51:28,044 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-09T01:51:28,044 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-09T01:51:28,045 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41835-0x1007478fd9d0002, quorum=127.0.0.1:63103, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-09T01:51:28,046 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41835-0x1007478fd9d0002, quorum=127.0.0.1:63103, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-09T01:51:28,047 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=41835 2024-12-09T01:51:28,047 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=41835 2024-12-09T01:51:28,047 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=41835 2024-12-09T01:51:28,048 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=41835 2024-12-09T01:51:28,048 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=41835 2024-12-09T01:51:28,070 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/ef6f18c58dc9:0 server-side Connection retries=45 2024-12-09T01:51:28,070 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T01:51:28,070 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-09T01:51:28,070 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-09T01:51:28,071 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T01:51:28,071 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-09T01:51:28,071 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-09T01:51:28,071 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-09T01:51:28,072 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:41039 2024-12-09T01:51:28,073 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:41039 connecting to ZooKeeper ensemble=127.0.0.1:63103 2024-12-09T01:51:28,074 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T01:51:28,076 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T01:51:28,081 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:410390x0, quorum=127.0.0.1:63103, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-09T01:51:28,081 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:41039-0x1007478fd9d0003 connected 2024-12-09T01:51:28,081 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41039-0x1007478fd9d0003, quorum=127.0.0.1:63103, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T01:51:28,082 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-09T01:51:28,083 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-09T01:51:28,083 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41039-0x1007478fd9d0003, quorum=127.0.0.1:63103, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-09T01:51:28,085 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41039-0x1007478fd9d0003, quorum=127.0.0.1:63103, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-09T01:51:28,085 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=41039 2024-12-09T01:51:28,086 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=41039 2024-12-09T01:51:28,086 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=41039 2024-12-09T01:51:28,086 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=41039 2024-12-09T01:51:28,086 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=41039 2024-12-09T01:51:28,098 DEBUG [M:0;ef6f18c58dc9:33921 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;ef6f18c58dc9:33921 2024-12-09T01:51:28,098 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/ef6f18c58dc9,33921,1733709087948 2024-12-09T01:51:28,103 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41835-0x1007478fd9d0002, quorum=127.0.0.1:63103, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T01:51:28,103 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33921-0x1007478fd9d0000, quorum=127.0.0.1:63103, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T01:51:28,103 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46367-0x1007478fd9d0001, quorum=127.0.0.1:63103, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T01:51:28,103 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41039-0x1007478fd9d0003, quorum=127.0.0.1:63103, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T01:51:28,103 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:33921-0x1007478fd9d0000, quorum=127.0.0.1:63103, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/ef6f18c58dc9,33921,1733709087948 2024-12-09T01:51:28,106 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41835-0x1007478fd9d0002, quorum=127.0.0.1:63103, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-09T01:51:28,106 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46367-0x1007478fd9d0001, quorum=127.0.0.1:63103, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-09T01:51:28,106 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41039-0x1007478fd9d0003, quorum=127.0.0.1:63103, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-09T01:51:28,106 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41835-0x1007478fd9d0002, quorum=127.0.0.1:63103, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T01:51:28,106 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46367-0x1007478fd9d0001, quorum=127.0.0.1:63103, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T01:51:28,106 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33921-0x1007478fd9d0000, quorum=127.0.0.1:63103, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T01:51:28,106 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41039-0x1007478fd9d0003, quorum=127.0.0.1:63103, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T01:51:28,107 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:33921-0x1007478fd9d0000, quorum=127.0.0.1:63103, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-09T01:51:28,107 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/ef6f18c58dc9,33921,1733709087948 from backup master directory 2024-12-09T01:51:28,110 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33921-0x1007478fd9d0000, quorum=127.0.0.1:63103, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/ef6f18c58dc9,33921,1733709087948 2024-12-09T01:51:28,110 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46367-0x1007478fd9d0001, quorum=127.0.0.1:63103, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T01:51:28,110 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41039-0x1007478fd9d0003, quorum=127.0.0.1:63103, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T01:51:28,110 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41835-0x1007478fd9d0002, quorum=127.0.0.1:63103, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T01:51:28,110 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33921-0x1007478fd9d0000, quorum=127.0.0.1:63103, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T01:51:28,111 WARN [master/ef6f18c58dc9:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-09T01:51:28,111 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=ef6f18c58dc9,33921,1733709087948 2024-12-09T01:51:28,118 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:45333/user/jenkins/test-data/8cc46a52-16d6-1efc-b9f5-b9a44f32340c/hbase.id] with ID: d1f82e98-0d41-49ac-85b2-a951f958989e 2024-12-09T01:51:28,118 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:45333/user/jenkins/test-data/8cc46a52-16d6-1efc-b9f5-b9a44f32340c/.tmp/hbase.id 2024-12-09T01:51:28,127 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40449 is added to blk_1073741826_1002 (size=42) 2024-12-09T01:51:28,127 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37693 is added to blk_1073741826_1002 (size=42) 2024-12-09T01:51:28,127 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42527 is added to blk_1073741826_1002 (size=42) 2024-12-09T01:51:28,128 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:45333/user/jenkins/test-data/8cc46a52-16d6-1efc-b9f5-b9a44f32340c/.tmp/hbase.id]:[hdfs://localhost:45333/user/jenkins/test-data/8cc46a52-16d6-1efc-b9f5-b9a44f32340c/hbase.id] 2024-12-09T01:51:28,146 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T01:51:28,146 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-09T01:51:28,148 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-12-09T01:51:28,150 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41039-0x1007478fd9d0003, quorum=127.0.0.1:63103, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T01:51:28,150 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46367-0x1007478fd9d0001, quorum=127.0.0.1:63103, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T01:51:28,150 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33921-0x1007478fd9d0000, quorum=127.0.0.1:63103, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T01:51:28,150 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41835-0x1007478fd9d0002, quorum=127.0.0.1:63103, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T01:51:28,161 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37693 is added to blk_1073741827_1003 (size=196) 2024-12-09T01:51:28,162 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42527 is added to blk_1073741827_1003 (size=196) 2024-12-09T01:51:28,163 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-09T01:51:28,164 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-09T01:51:28,164 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-09T01:51:28,164 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40449 is added to blk_1073741827_1003 (size=196) 2024-12-09T01:51:28,176 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42527 is added to blk_1073741828_1004 (size=1189) 2024-12-09T01:51:28,176 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40449 is added to blk_1073741828_1004 (size=1189) 2024-12-09T01:51:28,177 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37693 is added to blk_1073741828_1004 (size=1189) 2024-12-09T01:51:28,178 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:45333/user/jenkins/test-data/8cc46a52-16d6-1efc-b9f5-b9a44f32340c/MasterData/data/master/store 2024-12-09T01:51:28,188 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42527 is added to blk_1073741829_1005 (size=34) 2024-12-09T01:51:28,189 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40449 is added to blk_1073741829_1005 (size=34) 2024-12-09T01:51:28,189 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37693 is added to blk_1073741829_1005 (size=34) 2024-12-09T01:51:28,190 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T01:51:28,190 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-09T01:51:28,190 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T01:51:28,190 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T01:51:28,190 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-09T01:51:28,190 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T01:51:28,191 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T01:51:28,191 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733709088190Disabling compacts and flushes for region at 1733709088190Disabling writes for close at 1733709088190Writing region close event to WAL at 1733709088190Closed at 1733709088190 2024-12-09T01:51:28,192 WARN [master/ef6f18c58dc9:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:45333/user/jenkins/test-data/8cc46a52-16d6-1efc-b9f5-b9a44f32340c/MasterData/data/master/store/.initializing 2024-12-09T01:51:28,192 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:45333/user/jenkins/test-data/8cc46a52-16d6-1efc-b9f5-b9a44f32340c/MasterData/WALs/ef6f18c58dc9,33921,1733709087948 2024-12-09T01:51:28,196 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=ef6f18c58dc9%2C33921%2C1733709087948, suffix=, logDir=hdfs://localhost:45333/user/jenkins/test-data/8cc46a52-16d6-1efc-b9f5-b9a44f32340c/MasterData/WALs/ef6f18c58dc9,33921,1733709087948, archiveDir=hdfs://localhost:45333/user/jenkins/test-data/8cc46a52-16d6-1efc-b9f5-b9a44f32340c/MasterData/oldWALs, maxLogs=10 2024-12-09T01:51:28,197 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor ef6f18c58dc9%2C33921%2C1733709087948.1733709088196 2024-12-09T01:51:28,207 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/8cc46a52-16d6-1efc-b9f5-b9a44f32340c/MasterData/WALs/ef6f18c58dc9,33921,1733709087948/ef6f18c58dc9%2C33921%2C1733709087948.1733709088196 2024-12-09T01:51:28,212 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:32807:32807),(127.0.0.1/127.0.0.1:33653:33653),(127.0.0.1/127.0.0.1:42581:42581)] 2024-12-09T01:51:28,215 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-09T01:51:28,215 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T01:51:28,216 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T01:51:28,216 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T01:51:28,218 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T01:51:28,220 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-09T01:51:28,220 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T01:51:28,220 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T01:51:28,221 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T01:51:28,222 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-09T01:51:28,222 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T01:51:28,223 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T01:51:28,223 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T01:51:28,225 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-09T01:51:28,226 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T01:51:28,226 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T01:51:28,226 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T01:51:28,228 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-09T01:51:28,228 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T01:51:28,229 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T01:51:28,229 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T01:51:28,230 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45333/user/jenkins/test-data/8cc46a52-16d6-1efc-b9f5-b9a44f32340c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-09T01:51:28,231 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45333/user/jenkins/test-data/8cc46a52-16d6-1efc-b9f5-b9a44f32340c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-09T01:51:28,232 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T01:51:28,232 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T01:51:28,233 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-09T01:51:28,235 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T01:51:28,237 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45333/user/jenkins/test-data/8cc46a52-16d6-1efc-b9f5-b9a44f32340c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T01:51:28,238 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=66437682, jitterRate=-0.010001391172409058}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-09T01:51:28,239 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733709088216Initializing all the Stores at 1733709088217 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733709088217Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733709088218 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733709088218Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733709088218Cleaning up temporary data from old regions at 1733709088232 (+14 ms)Region opened successfully at 1733709088239 (+7 ms) 2024-12-09T01:51:28,239 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-09T01:51:28,244 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@56f87c75, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=ef6f18c58dc9/172.17.0.2:0 2024-12-09T01:51:28,246 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-09T01:51:28,246 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-09T01:51:28,246 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-09T01:51:28,246 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-09T01:51:28,247 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-12-09T01:51:28,247 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-12-09T01:51:28,247 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-09T01:51:28,251 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-09T01:51:28,253 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33921-0x1007478fd9d0000, quorum=127.0.0.1:63103, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-09T01:51:28,254 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-09T01:51:28,255 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-09T01:51:28,256 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33921-0x1007478fd9d0000, quorum=127.0.0.1:63103, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-09T01:51:28,259 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-09T01:51:28,259 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-09T01:51:28,261 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33921-0x1007478fd9d0000, quorum=127.0.0.1:63103, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-09T01:51:28,262 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-09T01:51:28,263 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33921-0x1007478fd9d0000, quorum=127.0.0.1:63103, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-09T01:51:28,265 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-09T01:51:28,267 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33921-0x1007478fd9d0000, quorum=127.0.0.1:63103, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-09T01:51:28,269 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-09T01:51:28,271 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41835-0x1007478fd9d0002, quorum=127.0.0.1:63103, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-09T01:51:28,271 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41835-0x1007478fd9d0002, quorum=127.0.0.1:63103, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T01:51:28,271 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46367-0x1007478fd9d0001, quorum=127.0.0.1:63103, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-09T01:51:28,271 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41039-0x1007478fd9d0003, quorum=127.0.0.1:63103, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-09T01:51:28,271 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46367-0x1007478fd9d0001, quorum=127.0.0.1:63103, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T01:51:28,271 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41039-0x1007478fd9d0003, quorum=127.0.0.1:63103, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T01:51:28,271 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33921-0x1007478fd9d0000, quorum=127.0.0.1:63103, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-09T01:51:28,271 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33921-0x1007478fd9d0000, quorum=127.0.0.1:63103, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T01:51:28,272 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=ef6f18c58dc9,33921,1733709087948, sessionid=0x1007478fd9d0000, setting cluster-up flag (Was=false) 2024-12-09T01:51:28,276 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41835-0x1007478fd9d0002, quorum=127.0.0.1:63103, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T01:51:28,276 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46367-0x1007478fd9d0001, quorum=127.0.0.1:63103, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T01:51:28,276 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33921-0x1007478fd9d0000, quorum=127.0.0.1:63103, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T01:51:28,276 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41039-0x1007478fd9d0003, quorum=127.0.0.1:63103, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T01:51:28,283 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-09T01:51:28,284 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=ef6f18c58dc9,33921,1733709087948 2024-12-09T01:51:28,288 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41835-0x1007478fd9d0002, quorum=127.0.0.1:63103, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T01:51:28,288 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33921-0x1007478fd9d0000, quorum=127.0.0.1:63103, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T01:51:28,288 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46367-0x1007478fd9d0001, quorum=127.0.0.1:63103, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T01:51:28,288 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41039-0x1007478fd9d0003, quorum=127.0.0.1:63103, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T01:51:28,294 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-09T01:51:28,295 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=ef6f18c58dc9,33921,1733709087948 2024-12-09T01:51:28,297 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:45333/user/jenkins/test-data/8cc46a52-16d6-1efc-b9f5-b9a44f32340c/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-09T01:51:28,300 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-09T01:51:28,300 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-09T01:51:28,301 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-09T01:51:28,301 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: ef6f18c58dc9,33921,1733709087948 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-09T01:51:28,302 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/ef6f18c58dc9:0, corePoolSize=5, maxPoolSize=5 2024-12-09T01:51:28,302 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/ef6f18c58dc9:0, corePoolSize=5, maxPoolSize=5 2024-12-09T01:51:28,302 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/ef6f18c58dc9:0, corePoolSize=5, maxPoolSize=5 2024-12-09T01:51:28,302 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/ef6f18c58dc9:0, corePoolSize=5, maxPoolSize=5 2024-12-09T01:51:28,302 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/ef6f18c58dc9:0, corePoolSize=10, maxPoolSize=10 2024-12-09T01:51:28,303 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/ef6f18c58dc9:0, corePoolSize=1, maxPoolSize=1 2024-12-09T01:51:28,303 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/ef6f18c58dc9:0, corePoolSize=2, maxPoolSize=2 2024-12-09T01:51:28,303 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/ef6f18c58dc9:0, corePoolSize=1, maxPoolSize=1 2024-12-09T01:51:28,305 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733709118305 2024-12-09T01:51:28,305 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-09T01:51:28,305 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-09T01:51:28,305 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-09T01:51:28,305 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-09T01:51:28,305 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-09T01:51:28,305 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-09T01:51:28,305 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-09T01:51:28,305 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-09T01:51:28,306 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T01:51:28,307 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-09T01:51:28,310 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-09T01:51:28,310 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-09T01:51:28,310 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-09T01:51:28,310 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-09T01:51:28,311 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-09T01:51:28,311 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-09T01:51:28,311 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/ef6f18c58dc9:0:becomeActiveMaster-HFileCleaner.large.0-1733709088311,5,FailOnTimeoutGroup] 2024-12-09T01:51:28,311 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/ef6f18c58dc9:0:becomeActiveMaster-HFileCleaner.small.0-1733709088311,5,FailOnTimeoutGroup] 2024-12-09T01:51:28,311 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-09T01:51:28,311 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-09T01:51:28,311 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-09T01:51:28,311 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-09T01:51:28,317 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42527 is added to blk_1073741831_1007 (size=1321) 2024-12-09T01:51:28,317 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40449 is added to blk_1073741831_1007 (size=1321) 2024-12-09T01:51:28,317 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37693 is added to blk_1073741831_1007 (size=1321) 2024-12-09T01:51:28,319 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:45333/user/jenkins/test-data/8cc46a52-16d6-1efc-b9f5-b9a44f32340c/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-09T01:51:28,319 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:45333/user/jenkins/test-data/8cc46a52-16d6-1efc-b9f5-b9a44f32340c 2024-12-09T01:51:28,328 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40449 is added to blk_1073741832_1008 (size=32) 2024-12-09T01:51:28,328 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42527 is added to blk_1073741832_1008 (size=32) 2024-12-09T01:51:28,329 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37693 is added to blk_1073741832_1008 (size=32) 2024-12-09T01:51:28,329 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T01:51:28,331 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-09T01:51:28,332 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-09T01:51:28,333 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T01:51:28,333 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T01:51:28,333 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-09T01:51:28,335 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-09T01:51:28,335 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T01:51:28,335 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T01:51:28,335 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-09T01:51:28,337 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-09T01:51:28,337 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T01:51:28,338 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T01:51:28,338 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-09T01:51:28,339 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-09T01:51:28,339 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T01:51:28,340 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T01:51:28,340 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-09T01:51:28,341 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45333/user/jenkins/test-data/8cc46a52-16d6-1efc-b9f5-b9a44f32340c/data/hbase/meta/1588230740 2024-12-09T01:51:28,341 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45333/user/jenkins/test-data/8cc46a52-16d6-1efc-b9f5-b9a44f32340c/data/hbase/meta/1588230740 2024-12-09T01:51:28,343 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-09T01:51:28,343 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-09T01:51:28,344 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-09T01:51:28,346 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-09T01:51:28,348 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45333/user/jenkins/test-data/8cc46a52-16d6-1efc-b9f5-b9a44f32340c/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T01:51:28,349 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=73805163, jitterRate=0.09978263080120087}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-09T01:51:28,350 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733709088329Initializing all the Stores at 1733709088330 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733709088330Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733709088331 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733709088331Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733709088331Cleaning up temporary data from old regions at 1733709088343 (+12 ms)Region opened successfully at 1733709088350 (+7 ms) 2024-12-09T01:51:28,350 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-09T01:51:28,350 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-09T01:51:28,350 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-09T01:51:28,350 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-09T01:51:28,350 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-09T01:51:28,350 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-09T01:51:28,350 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733709088350Disabling compacts and flushes for region at 1733709088350Disabling writes for close at 1733709088350Writing region close event to WAL at 1733709088350Closed at 1733709088350 2024-12-09T01:51:28,353 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-09T01:51:28,353 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-09T01:51:28,353 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-09T01:51:28,355 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-09T01:51:28,356 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-09T01:51:28,389 INFO [RS:2;ef6f18c58dc9:41039 {}] regionserver.HRegionServer(746): ClusterId : d1f82e98-0d41-49ac-85b2-a951f958989e 2024-12-09T01:51:28,389 INFO [RS:0;ef6f18c58dc9:46367 {}] regionserver.HRegionServer(746): ClusterId : d1f82e98-0d41-49ac-85b2-a951f958989e 2024-12-09T01:51:28,389 DEBUG [RS:2;ef6f18c58dc9:41039 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-09T01:51:28,389 DEBUG [RS:0;ef6f18c58dc9:46367 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-09T01:51:28,390 INFO [RS:1;ef6f18c58dc9:41835 {}] regionserver.HRegionServer(746): ClusterId : d1f82e98-0d41-49ac-85b2-a951f958989e 2024-12-09T01:51:28,390 DEBUG [RS:1;ef6f18c58dc9:41835 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-09T01:51:28,392 DEBUG [RS:2;ef6f18c58dc9:41039 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-09T01:51:28,392 DEBUG [RS:0;ef6f18c58dc9:46367 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-09T01:51:28,392 DEBUG [RS:2;ef6f18c58dc9:41039 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-09T01:51:28,392 DEBUG [RS:0;ef6f18c58dc9:46367 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-09T01:51:28,393 DEBUG [RS:1;ef6f18c58dc9:41835 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-09T01:51:28,393 DEBUG [RS:1;ef6f18c58dc9:41835 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-09T01:51:28,396 DEBUG [RS:2;ef6f18c58dc9:41039 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-09T01:51:28,396 DEBUG [RS:0;ef6f18c58dc9:46367 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-09T01:51:28,396 DEBUG [RS:1;ef6f18c58dc9:41835 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-09T01:51:28,396 DEBUG [RS:2;ef6f18c58dc9:41039 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@26f66e35, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=ef6f18c58dc9/172.17.0.2:0 2024-12-09T01:51:28,396 DEBUG [RS:0;ef6f18c58dc9:46367 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@63b23761, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=ef6f18c58dc9/172.17.0.2:0 2024-12-09T01:51:28,396 DEBUG [RS:1;ef6f18c58dc9:41835 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@97146b7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=ef6f18c58dc9/172.17.0.2:0 2024-12-09T01:51:28,409 DEBUG [RS:2;ef6f18c58dc9:41039 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:2;ef6f18c58dc9:41039 2024-12-09T01:51:28,409 INFO [RS:2;ef6f18c58dc9:41039 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-09T01:51:28,409 INFO [RS:2;ef6f18c58dc9:41039 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-09T01:51:28,409 DEBUG [RS:2;ef6f18c58dc9:41039 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-09T01:51:28,410 INFO [RS:2;ef6f18c58dc9:41039 {}] regionserver.HRegionServer(2659): reportForDuty to master=ef6f18c58dc9,33921,1733709087948 with port=41039, startcode=1733709088070 2024-12-09T01:51:28,411 DEBUG [RS:2;ef6f18c58dc9:41039 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-09T01:51:28,412 DEBUG [RS:0;ef6f18c58dc9:46367 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;ef6f18c58dc9:46367 2024-12-09T01:51:28,412 INFO [RS:0;ef6f18c58dc9:46367 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-09T01:51:28,412 INFO [RS:0;ef6f18c58dc9:46367 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-09T01:51:28,412 DEBUG [RS:0;ef6f18c58dc9:46367 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-09T01:51:28,413 INFO [RS:0;ef6f18c58dc9:46367 {}] regionserver.HRegionServer(2659): reportForDuty to master=ef6f18c58dc9,33921,1733709087948 with port=46367, startcode=1733709088002 2024-12-09T01:51:28,413 DEBUG [RS:0;ef6f18c58dc9:46367 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-09T01:51:28,413 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57013, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.5 (auth:SIMPLE), service=RegionServerStatusService 2024-12-09T01:51:28,414 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33921 {}] master.ServerManager(363): Checking decommissioned status of RegionServer ef6f18c58dc9,41039,1733709088070 2024-12-09T01:51:28,414 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33921 {}] master.ServerManager(517): Registering regionserver=ef6f18c58dc9,41039,1733709088070 2024-12-09T01:51:28,415 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44389, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.3 (auth:SIMPLE), service=RegionServerStatusService 2024-12-09T01:51:28,415 DEBUG [RS:1;ef6f18c58dc9:41835 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;ef6f18c58dc9:41835 2024-12-09T01:51:28,415 INFO [RS:1;ef6f18c58dc9:41835 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-09T01:51:28,415 INFO [RS:1;ef6f18c58dc9:41835 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-09T01:51:28,415 DEBUG [RS:1;ef6f18c58dc9:41835 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-09T01:51:28,416 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33921 {}] master.ServerManager(363): Checking decommissioned status of RegionServer ef6f18c58dc9,46367,1733709088002 2024-12-09T01:51:28,416 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33921 {}] master.ServerManager(517): Registering regionserver=ef6f18c58dc9,46367,1733709088002 2024-12-09T01:51:28,417 DEBUG [RS:2;ef6f18c58dc9:41039 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:45333/user/jenkins/test-data/8cc46a52-16d6-1efc-b9f5-b9a44f32340c 2024-12-09T01:51:28,417 DEBUG [RS:2;ef6f18c58dc9:41039 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:45333 2024-12-09T01:51:28,417 DEBUG [RS:2;ef6f18c58dc9:41039 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-09T01:51:28,417 INFO [RS:1;ef6f18c58dc9:41835 {}] regionserver.HRegionServer(2659): reportForDuty to master=ef6f18c58dc9,33921,1733709087948 with port=41835, startcode=1733709088034 2024-12-09T01:51:28,417 DEBUG [RS:1;ef6f18c58dc9:41835 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-09T01:51:28,419 DEBUG [RS:0;ef6f18c58dc9:46367 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:45333/user/jenkins/test-data/8cc46a52-16d6-1efc-b9f5-b9a44f32340c 2024-12-09T01:51:28,419 DEBUG [RS:0;ef6f18c58dc9:46367 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:45333 2024-12-09T01:51:28,419 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33921-0x1007478fd9d0000, quorum=127.0.0.1:63103, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-09T01:51:28,419 DEBUG [RS:0;ef6f18c58dc9:46367 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-09T01:51:28,419 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34813, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.4 (auth:SIMPLE), service=RegionServerStatusService 2024-12-09T01:51:28,419 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33921 {}] master.ServerManager(363): Checking decommissioned status of RegionServer ef6f18c58dc9,41835,1733709088034 2024-12-09T01:51:28,419 DEBUG [RS:2;ef6f18c58dc9:41039 {}] zookeeper.ZKUtil(111): regionserver:41039-0x1007478fd9d0003, quorum=127.0.0.1:63103, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/ef6f18c58dc9,41039,1733709088070 2024-12-09T01:51:28,420 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33921 {}] master.ServerManager(517): Registering regionserver=ef6f18c58dc9,41835,1733709088034 2024-12-09T01:51:28,420 WARN [RS:2;ef6f18c58dc9:41039 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-09T01:51:28,420 INFO [RS:2;ef6f18c58dc9:41039 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-09T01:51:28,420 DEBUG [RS:2;ef6f18c58dc9:41039 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:45333/user/jenkins/test-data/8cc46a52-16d6-1efc-b9f5-b9a44f32340c/WALs/ef6f18c58dc9,41039,1733709088070 2024-12-09T01:51:28,422 DEBUG [RS:1;ef6f18c58dc9:41835 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:45333/user/jenkins/test-data/8cc46a52-16d6-1efc-b9f5-b9a44f32340c 2024-12-09T01:51:28,422 DEBUG [RS:1;ef6f18c58dc9:41835 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:45333 2024-12-09T01:51:28,422 DEBUG [RS:1;ef6f18c58dc9:41835 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-09T01:51:28,427 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [ef6f18c58dc9,41039,1733709088070] 2024-12-09T01:51:28,427 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [ef6f18c58dc9,46367,1733709088002] 2024-12-09T01:51:28,428 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33921-0x1007478fd9d0000, quorum=127.0.0.1:63103, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-09T01:51:28,429 DEBUG [RS:1;ef6f18c58dc9:41835 {}] zookeeper.ZKUtil(111): regionserver:41835-0x1007478fd9d0002, quorum=127.0.0.1:63103, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/ef6f18c58dc9,41835,1733709088034 2024-12-09T01:51:28,429 WARN [RS:1;ef6f18c58dc9:41835 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-09T01:51:28,429 INFO [RS:1;ef6f18c58dc9:41835 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-09T01:51:28,429 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [ef6f18c58dc9,41835,1733709088034] 2024-12-09T01:51:28,429 DEBUG [RS:1;ef6f18c58dc9:41835 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:45333/user/jenkins/test-data/8cc46a52-16d6-1efc-b9f5-b9a44f32340c/WALs/ef6f18c58dc9,41835,1733709088034 2024-12-09T01:51:28,429 DEBUG [RS:0;ef6f18c58dc9:46367 {}] zookeeper.ZKUtil(111): regionserver:46367-0x1007478fd9d0001, quorum=127.0.0.1:63103, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/ef6f18c58dc9,46367,1733709088002 2024-12-09T01:51:28,429 WARN [RS:0;ef6f18c58dc9:46367 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-09T01:51:28,430 INFO [RS:0;ef6f18c58dc9:46367 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-09T01:51:28,430 DEBUG [RS:0;ef6f18c58dc9:46367 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:45333/user/jenkins/test-data/8cc46a52-16d6-1efc-b9f5-b9a44f32340c/WALs/ef6f18c58dc9,46367,1733709088002 2024-12-09T01:51:28,430 INFO [RS:2;ef6f18c58dc9:41039 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-09T01:51:28,435 INFO [RS:1;ef6f18c58dc9:41835 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-09T01:51:28,442 INFO [RS:2;ef6f18c58dc9:41039 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-09T01:51:28,442 INFO [RS:1;ef6f18c58dc9:41835 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-09T01:51:28,442 INFO [RS:2;ef6f18c58dc9:41039 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-09T01:51:28,442 INFO [RS:2;ef6f18c58dc9:41039 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T01:51:28,442 INFO [RS:1;ef6f18c58dc9:41835 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-09T01:51:28,443 INFO [RS:1;ef6f18c58dc9:41835 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T01:51:28,445 INFO [RS:2;ef6f18c58dc9:41039 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-09T01:51:28,445 INFO [RS:1;ef6f18c58dc9:41835 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-09T01:51:28,446 INFO [RS:0;ef6f18c58dc9:46367 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-09T01:51:28,446 INFO [RS:1;ef6f18c58dc9:41835 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-09T01:51:28,446 INFO [RS:2;ef6f18c58dc9:41039 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-09T01:51:28,446 INFO [RS:1;ef6f18c58dc9:41835 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-09T01:51:28,446 INFO [RS:2;ef6f18c58dc9:41039 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-09T01:51:28,447 DEBUG [RS:2;ef6f18c58dc9:41039 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/ef6f18c58dc9:0, corePoolSize=1, maxPoolSize=1 2024-12-09T01:51:28,447 DEBUG [RS:1;ef6f18c58dc9:41835 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/ef6f18c58dc9:0, corePoolSize=1, maxPoolSize=1 2024-12-09T01:51:28,447 DEBUG [RS:2;ef6f18c58dc9:41039 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/ef6f18c58dc9:0, corePoolSize=1, maxPoolSize=1 2024-12-09T01:51:28,447 DEBUG [RS:1;ef6f18c58dc9:41835 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/ef6f18c58dc9:0, corePoolSize=1, maxPoolSize=1 2024-12-09T01:51:28,447 DEBUG [RS:2;ef6f18c58dc9:41039 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/ef6f18c58dc9:0, corePoolSize=1, maxPoolSize=1 2024-12-09T01:51:28,447 DEBUG [RS:1;ef6f18c58dc9:41835 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/ef6f18c58dc9:0, corePoolSize=1, maxPoolSize=1 2024-12-09T01:51:28,447 DEBUG [RS:2;ef6f18c58dc9:41039 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/ef6f18c58dc9:0, corePoolSize=1, maxPoolSize=1 2024-12-09T01:51:28,447 DEBUG [RS:1;ef6f18c58dc9:41835 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/ef6f18c58dc9:0, corePoolSize=1, maxPoolSize=1 2024-12-09T01:51:28,447 DEBUG [RS:2;ef6f18c58dc9:41039 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/ef6f18c58dc9:0, corePoolSize=1, maxPoolSize=1 2024-12-09T01:51:28,447 DEBUG [RS:1;ef6f18c58dc9:41835 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/ef6f18c58dc9:0, corePoolSize=1, maxPoolSize=1 2024-12-09T01:51:28,447 DEBUG [RS:2;ef6f18c58dc9:41039 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/ef6f18c58dc9:0, corePoolSize=2, maxPoolSize=2 2024-12-09T01:51:28,447 DEBUG [RS:1;ef6f18c58dc9:41835 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/ef6f18c58dc9:0, corePoolSize=2, maxPoolSize=2 2024-12-09T01:51:28,447 DEBUG [RS:2;ef6f18c58dc9:41039 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/ef6f18c58dc9:0, corePoolSize=1, maxPoolSize=1 2024-12-09T01:51:28,447 DEBUG [RS:1;ef6f18c58dc9:41835 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/ef6f18c58dc9:0, corePoolSize=1, maxPoolSize=1 2024-12-09T01:51:28,447 DEBUG [RS:2;ef6f18c58dc9:41039 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/ef6f18c58dc9:0, corePoolSize=1, maxPoolSize=1 2024-12-09T01:51:28,447 DEBUG [RS:1;ef6f18c58dc9:41835 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/ef6f18c58dc9:0, corePoolSize=1, maxPoolSize=1 2024-12-09T01:51:28,448 DEBUG [RS:2;ef6f18c58dc9:41039 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/ef6f18c58dc9:0, corePoolSize=1, maxPoolSize=1 2024-12-09T01:51:28,448 DEBUG [RS:1;ef6f18c58dc9:41835 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/ef6f18c58dc9:0, corePoolSize=1, maxPoolSize=1 2024-12-09T01:51:28,448 DEBUG [RS:2;ef6f18c58dc9:41039 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/ef6f18c58dc9:0, corePoolSize=1, maxPoolSize=1 2024-12-09T01:51:28,448 DEBUG [RS:1;ef6f18c58dc9:41835 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/ef6f18c58dc9:0, corePoolSize=1, maxPoolSize=1 2024-12-09T01:51:28,448 DEBUG [RS:2;ef6f18c58dc9:41039 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/ef6f18c58dc9:0, corePoolSize=1, maxPoolSize=1 2024-12-09T01:51:28,448 DEBUG [RS:1;ef6f18c58dc9:41835 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/ef6f18c58dc9:0, corePoolSize=1, maxPoolSize=1 2024-12-09T01:51:28,448 DEBUG [RS:2;ef6f18c58dc9:41039 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/ef6f18c58dc9:0, corePoolSize=1, maxPoolSize=1 2024-12-09T01:51:28,448 DEBUG [RS:2;ef6f18c58dc9:41039 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/ef6f18c58dc9:0, corePoolSize=3, maxPoolSize=3 2024-12-09T01:51:28,448 DEBUG [RS:1;ef6f18c58dc9:41835 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/ef6f18c58dc9:0, corePoolSize=1, maxPoolSize=1 2024-12-09T01:51:28,448 DEBUG [RS:2;ef6f18c58dc9:41039 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/ef6f18c58dc9:0, corePoolSize=3, maxPoolSize=3 2024-12-09T01:51:28,448 DEBUG [RS:1;ef6f18c58dc9:41835 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/ef6f18c58dc9:0, corePoolSize=3, maxPoolSize=3 2024-12-09T01:51:28,448 DEBUG [RS:1;ef6f18c58dc9:41835 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/ef6f18c58dc9:0, corePoolSize=3, maxPoolSize=3 2024-12-09T01:51:28,449 INFO [RS:0;ef6f18c58dc9:46367 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-09T01:51:28,452 INFO [RS:0;ef6f18c58dc9:46367 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-09T01:51:28,452 INFO [RS:0;ef6f18c58dc9:46367 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T01:51:28,454 INFO [RS:2;ef6f18c58dc9:41039 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T01:51:28,454 INFO [RS:2;ef6f18c58dc9:41039 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T01:51:28,454 INFO [RS:2;ef6f18c58dc9:41039 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T01:51:28,454 INFO [RS:2;ef6f18c58dc9:41039 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-09T01:51:28,454 INFO [RS:2;ef6f18c58dc9:41039 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-09T01:51:28,454 INFO [RS:2;ef6f18c58dc9:41039 {}] hbase.ChoreService(168): Chore ScheduledChore name=ef6f18c58dc9,41039,1733709088070-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-09T01:51:28,454 INFO [RS:0;ef6f18c58dc9:46367 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-09T01:51:28,455 INFO [RS:1;ef6f18c58dc9:41835 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T01:51:28,455 INFO [RS:1;ef6f18c58dc9:41835 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T01:51:28,455 INFO [RS:1;ef6f18c58dc9:41835 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T01:51:28,455 INFO [RS:1;ef6f18c58dc9:41835 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-09T01:51:28,455 INFO [RS:1;ef6f18c58dc9:41835 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-09T01:51:28,455 INFO [RS:0;ef6f18c58dc9:46367 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-09T01:51:28,455 INFO [RS:1;ef6f18c58dc9:41835 {}] hbase.ChoreService(168): Chore ScheduledChore name=ef6f18c58dc9,41835,1733709088034-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-09T01:51:28,455 INFO [RS:0;ef6f18c58dc9:46367 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-09T01:51:28,456 DEBUG [RS:0;ef6f18c58dc9:46367 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/ef6f18c58dc9:0, corePoolSize=1, maxPoolSize=1 2024-12-09T01:51:28,456 DEBUG [RS:0;ef6f18c58dc9:46367 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/ef6f18c58dc9:0, corePoolSize=1, maxPoolSize=1 2024-12-09T01:51:28,456 DEBUG [RS:0;ef6f18c58dc9:46367 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/ef6f18c58dc9:0, corePoolSize=1, maxPoolSize=1 2024-12-09T01:51:28,456 DEBUG [RS:0;ef6f18c58dc9:46367 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/ef6f18c58dc9:0, corePoolSize=1, maxPoolSize=1 2024-12-09T01:51:28,456 DEBUG [RS:0;ef6f18c58dc9:46367 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/ef6f18c58dc9:0, corePoolSize=1, maxPoolSize=1 2024-12-09T01:51:28,456 DEBUG [RS:0;ef6f18c58dc9:46367 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/ef6f18c58dc9:0, corePoolSize=2, maxPoolSize=2 2024-12-09T01:51:28,456 DEBUG [RS:0;ef6f18c58dc9:46367 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/ef6f18c58dc9:0, corePoolSize=1, maxPoolSize=1 2024-12-09T01:51:28,456 DEBUG [RS:0;ef6f18c58dc9:46367 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/ef6f18c58dc9:0, corePoolSize=1, maxPoolSize=1 2024-12-09T01:51:28,456 DEBUG [RS:0;ef6f18c58dc9:46367 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/ef6f18c58dc9:0, corePoolSize=1, maxPoolSize=1 2024-12-09T01:51:28,456 DEBUG [RS:0;ef6f18c58dc9:46367 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/ef6f18c58dc9:0, corePoolSize=1, maxPoolSize=1 2024-12-09T01:51:28,456 DEBUG [RS:0;ef6f18c58dc9:46367 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/ef6f18c58dc9:0, corePoolSize=1, maxPoolSize=1 2024-12-09T01:51:28,456 DEBUG [RS:0;ef6f18c58dc9:46367 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/ef6f18c58dc9:0, corePoolSize=1, maxPoolSize=1 2024-12-09T01:51:28,457 DEBUG [RS:0;ef6f18c58dc9:46367 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/ef6f18c58dc9:0, corePoolSize=3, maxPoolSize=3 2024-12-09T01:51:28,457 DEBUG [RS:0;ef6f18c58dc9:46367 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/ef6f18c58dc9:0, corePoolSize=3, maxPoolSize=3 2024-12-09T01:51:28,458 INFO [RS:0;ef6f18c58dc9:46367 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T01:51:28,458 INFO [RS:0;ef6f18c58dc9:46367 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T01:51:28,458 INFO [RS:0;ef6f18c58dc9:46367 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T01:51:28,458 INFO [RS:0;ef6f18c58dc9:46367 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-09T01:51:28,458 INFO [RS:0;ef6f18c58dc9:46367 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-09T01:51:28,458 INFO [RS:0;ef6f18c58dc9:46367 {}] hbase.ChoreService(168): Chore ScheduledChore name=ef6f18c58dc9,46367,1733709088002-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-09T01:51:28,470 INFO [RS:2;ef6f18c58dc9:41039 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-09T01:51:28,470 INFO [RS:2;ef6f18c58dc9:41039 {}] hbase.ChoreService(168): Chore ScheduledChore name=ef6f18c58dc9,41039,1733709088070-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T01:51:28,470 INFO [RS:2;ef6f18c58dc9:41039 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T01:51:28,470 INFO [RS:2;ef6f18c58dc9:41039 {}] regionserver.Replication(171): ef6f18c58dc9,41039,1733709088070 started 2024-12-09T01:51:28,472 INFO [RS:0;ef6f18c58dc9:46367 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-09T01:51:28,472 INFO [RS:0;ef6f18c58dc9:46367 {}] hbase.ChoreService(168): Chore ScheduledChore name=ef6f18c58dc9,46367,1733709088002-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T01:51:28,472 INFO [RS:0;ef6f18c58dc9:46367 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T01:51:28,472 INFO [RS:0;ef6f18c58dc9:46367 {}] regionserver.Replication(171): ef6f18c58dc9,46367,1733709088002 started 2024-12-09T01:51:28,477 INFO [RS:1;ef6f18c58dc9:41835 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-09T01:51:28,477 INFO [RS:1;ef6f18c58dc9:41835 {}] hbase.ChoreService(168): Chore ScheduledChore name=ef6f18c58dc9,41835,1733709088034-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T01:51:28,477 INFO [RS:1;ef6f18c58dc9:41835 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T01:51:28,477 INFO [RS:1;ef6f18c58dc9:41835 {}] regionserver.Replication(171): ef6f18c58dc9,41835,1733709088034 started 2024-12-09T01:51:28,485 INFO [RS:2;ef6f18c58dc9:41039 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T01:51:28,485 INFO [RS:2;ef6f18c58dc9:41039 {}] regionserver.HRegionServer(1482): Serving as ef6f18c58dc9,41039,1733709088070, RpcServer on ef6f18c58dc9/172.17.0.2:41039, sessionid=0x1007478fd9d0003 2024-12-09T01:51:28,485 DEBUG [RS:2;ef6f18c58dc9:41039 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-09T01:51:28,485 DEBUG [RS:2;ef6f18c58dc9:41039 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager ef6f18c58dc9,41039,1733709088070 2024-12-09T01:51:28,485 DEBUG [RS:2;ef6f18c58dc9:41039 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'ef6f18c58dc9,41039,1733709088070' 2024-12-09T01:51:28,485 DEBUG [RS:2;ef6f18c58dc9:41039 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-09T01:51:28,486 DEBUG [RS:2;ef6f18c58dc9:41039 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-09T01:51:28,487 DEBUG [RS:2;ef6f18c58dc9:41039 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-09T01:51:28,487 DEBUG [RS:2;ef6f18c58dc9:41039 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-09T01:51:28,487 DEBUG [RS:2;ef6f18c58dc9:41039 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager ef6f18c58dc9,41039,1733709088070 2024-12-09T01:51:28,487 DEBUG [RS:2;ef6f18c58dc9:41039 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'ef6f18c58dc9,41039,1733709088070' 2024-12-09T01:51:28,487 DEBUG [RS:2;ef6f18c58dc9:41039 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-09T01:51:28,487 INFO [RS:0;ef6f18c58dc9:46367 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T01:51:28,487 INFO [RS:0;ef6f18c58dc9:46367 {}] regionserver.HRegionServer(1482): Serving as ef6f18c58dc9,46367,1733709088002, RpcServer on ef6f18c58dc9/172.17.0.2:46367, sessionid=0x1007478fd9d0001 2024-12-09T01:51:28,487 DEBUG [RS:0;ef6f18c58dc9:46367 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-09T01:51:28,487 DEBUG [RS:0;ef6f18c58dc9:46367 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager ef6f18c58dc9,46367,1733709088002 2024-12-09T01:51:28,487 DEBUG [RS:2;ef6f18c58dc9:41039 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-09T01:51:28,487 DEBUG [RS:0;ef6f18c58dc9:46367 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'ef6f18c58dc9,46367,1733709088002' 2024-12-09T01:51:28,487 DEBUG [RS:0;ef6f18c58dc9:46367 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-09T01:51:28,488 DEBUG [RS:2;ef6f18c58dc9:41039 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-09T01:51:28,488 INFO [RS:2;ef6f18c58dc9:41039 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-09T01:51:28,488 INFO [RS:2;ef6f18c58dc9:41039 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-09T01:51:28,488 DEBUG [RS:0;ef6f18c58dc9:46367 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-09T01:51:28,489 DEBUG [RS:0;ef6f18c58dc9:46367 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-09T01:51:28,489 DEBUG [RS:0;ef6f18c58dc9:46367 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-09T01:51:28,489 DEBUG [RS:0;ef6f18c58dc9:46367 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager ef6f18c58dc9,46367,1733709088002 2024-12-09T01:51:28,489 DEBUG [RS:0;ef6f18c58dc9:46367 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'ef6f18c58dc9,46367,1733709088002' 2024-12-09T01:51:28,489 DEBUG [RS:0;ef6f18c58dc9:46367 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-09T01:51:28,489 DEBUG [RS:0;ef6f18c58dc9:46367 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-09T01:51:28,490 DEBUG [RS:0;ef6f18c58dc9:46367 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-09T01:51:28,490 INFO [RS:0;ef6f18c58dc9:46367 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-09T01:51:28,490 INFO [RS:0;ef6f18c58dc9:46367 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-09T01:51:28,499 INFO [RS:1;ef6f18c58dc9:41835 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T01:51:28,499 INFO [RS:1;ef6f18c58dc9:41835 {}] regionserver.HRegionServer(1482): Serving as ef6f18c58dc9,41835,1733709088034, RpcServer on ef6f18c58dc9/172.17.0.2:41835, sessionid=0x1007478fd9d0002 2024-12-09T01:51:28,499 DEBUG [RS:1;ef6f18c58dc9:41835 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-09T01:51:28,499 DEBUG [RS:1;ef6f18c58dc9:41835 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager ef6f18c58dc9,41835,1733709088034 2024-12-09T01:51:28,499 DEBUG [RS:1;ef6f18c58dc9:41835 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'ef6f18c58dc9,41835,1733709088034' 2024-12-09T01:51:28,499 DEBUG [RS:1;ef6f18c58dc9:41835 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-09T01:51:28,500 DEBUG [RS:1;ef6f18c58dc9:41835 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-09T01:51:28,500 DEBUG [RS:1;ef6f18c58dc9:41835 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-09T01:51:28,500 DEBUG [RS:1;ef6f18c58dc9:41835 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-09T01:51:28,501 DEBUG [RS:1;ef6f18c58dc9:41835 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager ef6f18c58dc9,41835,1733709088034 2024-12-09T01:51:28,501 DEBUG [RS:1;ef6f18c58dc9:41835 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'ef6f18c58dc9,41835,1733709088034' 2024-12-09T01:51:28,501 DEBUG [RS:1;ef6f18c58dc9:41835 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-09T01:51:28,501 DEBUG [RS:1;ef6f18c58dc9:41835 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-09T01:51:28,501 DEBUG [RS:1;ef6f18c58dc9:41835 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-09T01:51:28,502 INFO [RS:1;ef6f18c58dc9:41835 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-09T01:51:28,502 INFO [RS:1;ef6f18c58dc9:41835 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-09T01:51:28,507 WARN [ef6f18c58dc9:33921 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-09T01:51:28,591 INFO [RS:2;ef6f18c58dc9:41039 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=ef6f18c58dc9%2C41039%2C1733709088070, suffix=, logDir=hdfs://localhost:45333/user/jenkins/test-data/8cc46a52-16d6-1efc-b9f5-b9a44f32340c/WALs/ef6f18c58dc9,41039,1733709088070, archiveDir=hdfs://localhost:45333/user/jenkins/test-data/8cc46a52-16d6-1efc-b9f5-b9a44f32340c/oldWALs, maxLogs=32 2024-12-09T01:51:28,592 INFO [RS:0;ef6f18c58dc9:46367 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=ef6f18c58dc9%2C46367%2C1733709088002, suffix=, logDir=hdfs://localhost:45333/user/jenkins/test-data/8cc46a52-16d6-1efc-b9f5-b9a44f32340c/WALs/ef6f18c58dc9,46367,1733709088002, archiveDir=hdfs://localhost:45333/user/jenkins/test-data/8cc46a52-16d6-1efc-b9f5-b9a44f32340c/oldWALs, maxLogs=32 2024-12-09T01:51:28,593 INFO [RS:2;ef6f18c58dc9:41039 {}] monitor.StreamSlowMonitor(122): New stream slow monitor ef6f18c58dc9%2C41039%2C1733709088070.1733709088593 2024-12-09T01:51:28,594 INFO [RS:0;ef6f18c58dc9:46367 {}] monitor.StreamSlowMonitor(122): New stream slow monitor ef6f18c58dc9%2C46367%2C1733709088002.1733709088594 2024-12-09T01:51:28,602 INFO [RS:0;ef6f18c58dc9:46367 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/8cc46a52-16d6-1efc-b9f5-b9a44f32340c/WALs/ef6f18c58dc9,46367,1733709088002/ef6f18c58dc9%2C46367%2C1733709088002.1733709088594 2024-12-09T01:51:28,604 INFO [RS:1;ef6f18c58dc9:41835 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=ef6f18c58dc9%2C41835%2C1733709088034, suffix=, logDir=hdfs://localhost:45333/user/jenkins/test-data/8cc46a52-16d6-1efc-b9f5-b9a44f32340c/WALs/ef6f18c58dc9,41835,1733709088034, archiveDir=hdfs://localhost:45333/user/jenkins/test-data/8cc46a52-16d6-1efc-b9f5-b9a44f32340c/oldWALs, maxLogs=32 2024-12-09T01:51:28,605 INFO [RS:1;ef6f18c58dc9:41835 {}] monitor.StreamSlowMonitor(122): New stream slow monitor ef6f18c58dc9%2C41835%2C1733709088034.1733709088605 2024-12-09T01:51:28,609 INFO [RS:2;ef6f18c58dc9:41039 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/8cc46a52-16d6-1efc-b9f5-b9a44f32340c/WALs/ef6f18c58dc9,41039,1733709088070/ef6f18c58dc9%2C41039%2C1733709088070.1733709088593 2024-12-09T01:51:28,609 DEBUG [RS:0;ef6f18c58dc9:46367 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:32807:32807),(127.0.0.1/127.0.0.1:33653:33653),(127.0.0.1/127.0.0.1:42581:42581)] 2024-12-09T01:51:28,612 DEBUG [RS:2;ef6f18c58dc9:41039 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33653:33653),(127.0.0.1/127.0.0.1:32807:32807),(127.0.0.1/127.0.0.1:42581:42581)] 2024-12-09T01:51:28,617 INFO [RS:1;ef6f18c58dc9:41835 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/8cc46a52-16d6-1efc-b9f5-b9a44f32340c/WALs/ef6f18c58dc9,41835,1733709088034/ef6f18c58dc9%2C41835%2C1733709088034.1733709088605 2024-12-09T01:51:28,618 DEBUG [RS:1;ef6f18c58dc9:41835 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:32807:32807),(127.0.0.1/127.0.0.1:42581:42581),(127.0.0.1/127.0.0.1:33653:33653)] 2024-12-09T01:51:28,757 DEBUG [ef6f18c58dc9:33921 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=3, allServersCount=3 2024-12-09T01:51:28,757 DEBUG [ef6f18c58dc9:33921 {}] balancer.BalancerClusterState(204): Hosts are {ef6f18c58dc9=0} racks are {/default-rack=0} 2024-12-09T01:51:28,760 DEBUG [ef6f18c58dc9:33921 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-09T01:51:28,760 DEBUG [ef6f18c58dc9:33921 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-09T01:51:28,760 DEBUG [ef6f18c58dc9:33921 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-09T01:51:28,760 DEBUG [ef6f18c58dc9:33921 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-09T01:51:28,760 DEBUG [ef6f18c58dc9:33921 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-09T01:51:28,760 DEBUG [ef6f18c58dc9:33921 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-09T01:51:28,760 INFO [ef6f18c58dc9:33921 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-09T01:51:28,760 INFO [ef6f18c58dc9:33921 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-09T01:51:28,760 INFO [ef6f18c58dc9:33921 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-09T01:51:28,760 DEBUG [ef6f18c58dc9:33921 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-09T01:51:28,761 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=ef6f18c58dc9,41039,1733709088070 2024-12-09T01:51:28,763 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as ef6f18c58dc9,41039,1733709088070, state=OPENING 2024-12-09T01:51:28,765 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-09T01:51:28,767 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41835-0x1007478fd9d0002, quorum=127.0.0.1:63103, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T01:51:28,767 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46367-0x1007478fd9d0001, quorum=127.0.0.1:63103, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T01:51:28,767 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41039-0x1007478fd9d0003, quorum=127.0.0.1:63103, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T01:51:28,767 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33921-0x1007478fd9d0000, quorum=127.0.0.1:63103, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T01:51:28,768 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-09T01:51:28,768 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T01:51:28,768 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=ef6f18c58dc9,41039,1733709088070}] 2024-12-09T01:51:28,768 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T01:51:28,768 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T01:51:28,768 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T01:51:28,922 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-09T01:51:28,924 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-10-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39989, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-09T01:51:28,929 INFO [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-09T01:51:28,929 INFO [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-09T01:51:28,932 INFO [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=ef6f18c58dc9%2C41039%2C1733709088070.meta, suffix=.meta, logDir=hdfs://localhost:45333/user/jenkins/test-data/8cc46a52-16d6-1efc-b9f5-b9a44f32340c/WALs/ef6f18c58dc9,41039,1733709088070, archiveDir=hdfs://localhost:45333/user/jenkins/test-data/8cc46a52-16d6-1efc-b9f5-b9a44f32340c/oldWALs, maxLogs=32 2024-12-09T01:51:28,933 INFO [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor ef6f18c58dc9%2C41039%2C1733709088070.meta.1733709088933.meta 2024-12-09T01:51:28,943 INFO [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/8cc46a52-16d6-1efc-b9f5-b9a44f32340c/WALs/ef6f18c58dc9,41039,1733709088070/ef6f18c58dc9%2C41039%2C1733709088070.meta.1733709088933.meta 2024-12-09T01:51:28,947 DEBUG [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:32807:32807),(127.0.0.1/127.0.0.1:42581:42581),(127.0.0.1/127.0.0.1:33653:33653)] 2024-12-09T01:51:28,950 DEBUG [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-09T01:51:28,950 DEBUG [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-09T01:51:28,950 DEBUG [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-09T01:51:28,950 INFO [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-09T01:51:28,950 DEBUG [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-09T01:51:28,950 DEBUG [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T01:51:28,951 DEBUG [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-09T01:51:28,951 DEBUG [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-09T01:51:28,953 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-09T01:51:28,954 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-09T01:51:28,954 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T01:51:28,955 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T01:51:28,955 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-09T01:51:28,956 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-09T01:51:28,956 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T01:51:28,957 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T01:51:28,957 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-09T01:51:28,958 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-09T01:51:28,958 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T01:51:28,958 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T01:51:28,958 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-09T01:51:28,959 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-09T01:51:28,959 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T01:51:28,960 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T01:51:28,960 DEBUG [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-09T01:51:28,961 DEBUG [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45333/user/jenkins/test-data/8cc46a52-16d6-1efc-b9f5-b9a44f32340c/data/hbase/meta/1588230740 2024-12-09T01:51:28,963 DEBUG [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45333/user/jenkins/test-data/8cc46a52-16d6-1efc-b9f5-b9a44f32340c/data/hbase/meta/1588230740 2024-12-09T01:51:28,964 DEBUG [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-09T01:51:28,964 DEBUG [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-09T01:51:28,965 DEBUG [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-09T01:51:28,967 DEBUG [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-09T01:51:28,968 INFO [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=63905627, jitterRate=-0.047731950879096985}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-09T01:51:28,968 DEBUG [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-09T01:51:28,969 DEBUG [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733709088951Writing region info on filesystem at 1733709088951Initializing all the Stores at 1733709088952 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733709088952Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733709088953 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733709088953Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733709088953Cleaning up temporary data from old regions at 1733709088964 (+11 ms)Running coprocessor post-open hooks at 1733709088968 (+4 ms)Region opened successfully at 1733709088969 (+1 ms) 2024-12-09T01:51:28,971 INFO [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733709088922 2024-12-09T01:51:28,974 DEBUG [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-09T01:51:28,974 INFO [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-09T01:51:28,976 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=ef6f18c58dc9,41039,1733709088070 2024-12-09T01:51:28,977 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as ef6f18c58dc9,41039,1733709088070, state=OPEN 2024-12-09T01:51:28,979 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33921-0x1007478fd9d0000, quorum=127.0.0.1:63103, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T01:51:28,979 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41039-0x1007478fd9d0003, quorum=127.0.0.1:63103, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T01:51:28,979 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46367-0x1007478fd9d0001, quorum=127.0.0.1:63103, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T01:51:28,979 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=ef6f18c58dc9,41039,1733709088070 2024-12-09T01:51:28,979 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41835-0x1007478fd9d0002, quorum=127.0.0.1:63103, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T01:51:28,979 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T01:51:28,979 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T01:51:28,979 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T01:51:28,980 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T01:51:28,984 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-09T01:51:28,984 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=ef6f18c58dc9,41039,1733709088070 in 211 msec 2024-12-09T01:51:28,989 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-09T01:51:28,989 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 631 msec 2024-12-09T01:51:28,990 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-09T01:51:28,990 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-09T01:51:28,992 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T01:51:28,992 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=ef6f18c58dc9,41039,1733709088070, seqNum=-1] 2024-12-09T01:51:28,992 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T01:51:28,994 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35913, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T01:51:29,003 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 702 msec 2024-12-09T01:51:29,003 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733709089003, completionTime=-1 2024-12-09T01:51:29,003 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=3; waited=0ms, expected min=3 server(s), max=3 server(s), master is running 2024-12-09T01:51:29,003 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-09T01:51:29,006 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=3 2024-12-09T01:51:29,006 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733709149006 2024-12-09T01:51:29,006 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733709209006 2024-12-09T01:51:29,006 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 3 msec 2024-12-09T01:51:29,006 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ef6f18c58dc9,33921,1733709087948-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T01:51:29,006 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ef6f18c58dc9,33921,1733709087948-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T01:51:29,007 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ef6f18c58dc9,33921,1733709087948-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T01:51:29,007 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-ef6f18c58dc9:33921, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T01:51:29,007 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-09T01:51:29,007 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-09T01:51:29,009 DEBUG [master/ef6f18c58dc9:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-09T01:51:29,012 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.901sec 2024-12-09T01:51:29,012 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-09T01:51:29,012 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-09T01:51:29,012 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-09T01:51:29,012 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-09T01:51:29,012 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-09T01:51:29,012 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ef6f18c58dc9,33921,1733709087948-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-09T01:51:29,012 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ef6f18c58dc9,33921,1733709087948-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-09T01:51:29,015 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-09T01:51:29,015 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-09T01:51:29,015 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ef6f18c58dc9,33921,1733709087948-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T01:51:29,089 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@fa1244, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T01:51:29,089 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request ef6f18c58dc9,33921,-1 for getting cluster id 2024-12-09T01:51:29,089 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-09T01:51:29,091 DEBUG [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'd1f82e98-0d41-49ac-85b2-a951f958989e' 2024-12-09T01:51:29,091 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-09T01:51:29,091 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "d1f82e98-0d41-49ac-85b2-a951f958989e" 2024-12-09T01:51:29,092 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@36082c61, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T01:51:29,092 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [ef6f18c58dc9,33921,-1] 2024-12-09T01:51:29,092 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-09T01:51:29,093 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T01:51:29,094 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34916, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-09T01:51:29,095 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@622046a3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T01:51:29,095 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T01:51:29,097 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=ef6f18c58dc9,41039,1733709088070, seqNum=-1] 2024-12-09T01:51:29,097 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T01:51:29,099 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-10-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40614, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T01:51:29,100 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=ef6f18c58dc9,33921,1733709087948 2024-12-09T01:51:29,101 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-09T01:51:29,102 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] client.AsyncConnectionImpl(321): The fetched master address is ef6f18c58dc9,33921,1733709087948 2024-12-09T01:51:29,102 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@f53ea36 2024-12-09T01:51:29,103 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-09T01:51:29,104 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34922, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-09T01:51:29,105 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33921 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-09T01:51:29,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33921 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC 2024-12-09T01:51:29,109 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_PRE_OPERATION 2024-12-09T01:51:29,109 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T01:51:29,109 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33921 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestHBaseWalOnEC" procId is: 4 2024-12-09T01:51:29,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33921 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-09T01:51:29,111 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-09T01:51:29,121 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42527 is added to blk_1073741837_1013 (size=392) 2024-12-09T01:51:29,121 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40449 is added to blk_1073741837_1013 (size=392) 2024-12-09T01:51:29,123 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37693 is added to blk_1073741837_1013 (size=392) 2024-12-09T01:51:29,125 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 5d197a5f0c0c71f42a66cb8313074619, NAME => 'TestHBaseWalOnEC,,1733709089105.5d197a5f0c0c71f42a66cb8313074619.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:45333/user/jenkins/test-data/8cc46a52-16d6-1efc-b9f5-b9a44f32340c 2024-12-09T01:51:29,133 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37693 is added to blk_1073741838_1014 (size=51) 2024-12-09T01:51:29,134 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42527 is added to blk_1073741838_1014 (size=51) 2024-12-09T01:51:29,134 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40449 is added to blk_1073741838_1014 (size=51) 2024-12-09T01:51:29,137 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1733709089105.5d197a5f0c0c71f42a66cb8313074619.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T01:51:29,137 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1722): Closing 5d197a5f0c0c71f42a66cb8313074619, disabling compactions & flushes 2024-12-09T01:51:29,137 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1733709089105.5d197a5f0c0c71f42a66cb8313074619. 2024-12-09T01:51:29,137 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1733709089105.5d197a5f0c0c71f42a66cb8313074619. 2024-12-09T01:51:29,137 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1733709089105.5d197a5f0c0c71f42a66cb8313074619. after waiting 0 ms 2024-12-09T01:51:29,137 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1733709089105.5d197a5f0c0c71f42a66cb8313074619. 2024-12-09T01:51:29,137 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1733709089105.5d197a5f0c0c71f42a66cb8313074619. 2024-12-09T01:51:29,137 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1676): Region close journal for 5d197a5f0c0c71f42a66cb8313074619: Waiting for close lock at 1733709089137Disabling compacts and flushes for region at 1733709089137Disabling writes for close at 1733709089137Writing region close event to WAL at 1733709089137Closed at 1733709089137 2024-12-09T01:51:29,139 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ADD_TO_META 2024-12-09T01:51:29,139 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestHBaseWalOnEC,,1733709089105.5d197a5f0c0c71f42a66cb8313074619.","families":{"info":[{"qualifier":"regioninfo","vlen":50,"tag":[],"timestamp":"1733709089139"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733709089139"}]},"ts":"1733709089139"} 2024-12-09T01:51:29,143 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-09T01:51:29,144 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-09T01:51:29,145 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733709089145"}]},"ts":"1733709089145"} 2024-12-09T01:51:29,147 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLING in hbase:meta 2024-12-09T01:51:29,148 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {ef6f18c58dc9=0} racks are {/default-rack=0} 2024-12-09T01:51:29,149 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-09T01:51:29,149 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-09T01:51:29,149 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-09T01:51:29,149 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-09T01:51:29,149 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-09T01:51:29,149 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-09T01:51:29,149 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-09T01:51:29,149 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-09T01:51:29,149 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-09T01:51:29,149 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-09T01:51:29,149 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=5d197a5f0c0c71f42a66cb8313074619, ASSIGN}] 2024-12-09T01:51:29,152 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=5d197a5f0c0c71f42a66cb8313074619, ASSIGN 2024-12-09T01:51:29,153 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=5d197a5f0c0c71f42a66cb8313074619, ASSIGN; state=OFFLINE, location=ef6f18c58dc9,41039,1733709088070; forceNewPlan=false, retain=false 2024-12-09T01:51:29,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33921 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-09T01:51:29,304 INFO [ef6f18c58dc9:33921 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-09T01:51:29,304 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=5d197a5f0c0c71f42a66cb8313074619, regionState=OPENING, regionLocation=ef6f18c58dc9,41039,1733709088070 2024-12-09T01:51:29,309 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-10-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=5d197a5f0c0c71f42a66cb8313074619, ASSIGN because future has completed 2024-12-09T01:51:29,310 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 5d197a5f0c0c71f42a66cb8313074619, server=ef6f18c58dc9,41039,1733709088070}] 2024-12-09T01:51:29,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33921 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-09T01:51:29,468 INFO [RS_OPEN_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestHBaseWalOnEC,,1733709089105.5d197a5f0c0c71f42a66cb8313074619. 2024-12-09T01:51:29,468 DEBUG [RS_OPEN_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 5d197a5f0c0c71f42a66cb8313074619, NAME => 'TestHBaseWalOnEC,,1733709089105.5d197a5f0c0c71f42a66cb8313074619.', STARTKEY => '', ENDKEY => ''} 2024-12-09T01:51:29,469 DEBUG [RS_OPEN_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestHBaseWalOnEC 5d197a5f0c0c71f42a66cb8313074619 2024-12-09T01:51:29,469 DEBUG [RS_OPEN_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1733709089105.5d197a5f0c0c71f42a66cb8313074619.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T01:51:29,469 DEBUG [RS_OPEN_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 5d197a5f0c0c71f42a66cb8313074619 2024-12-09T01:51:29,469 DEBUG [RS_OPEN_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 5d197a5f0c0c71f42a66cb8313074619 2024-12-09T01:51:29,471 INFO [StoreOpener-5d197a5f0c0c71f42a66cb8313074619-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 5d197a5f0c0c71f42a66cb8313074619 2024-12-09T01:51:29,472 INFO [StoreOpener-5d197a5f0c0c71f42a66cb8313074619-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 5d197a5f0c0c71f42a66cb8313074619 columnFamilyName cf 2024-12-09T01:51:29,472 DEBUG [StoreOpener-5d197a5f0c0c71f42a66cb8313074619-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T01:51:29,473 INFO [StoreOpener-5d197a5f0c0c71f42a66cb8313074619-1 {}] regionserver.HStore(327): Store=5d197a5f0c0c71f42a66cb8313074619/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T01:51:29,473 DEBUG [RS_OPEN_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 5d197a5f0c0c71f42a66cb8313074619 2024-12-09T01:51:29,474 DEBUG [RS_OPEN_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45333/user/jenkins/test-data/8cc46a52-16d6-1efc-b9f5-b9a44f32340c/data/default/TestHBaseWalOnEC/5d197a5f0c0c71f42a66cb8313074619 2024-12-09T01:51:29,474 DEBUG [RS_OPEN_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45333/user/jenkins/test-data/8cc46a52-16d6-1efc-b9f5-b9a44f32340c/data/default/TestHBaseWalOnEC/5d197a5f0c0c71f42a66cb8313074619 2024-12-09T01:51:29,475 DEBUG [RS_OPEN_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 5d197a5f0c0c71f42a66cb8313074619 2024-12-09T01:51:29,475 DEBUG [RS_OPEN_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 5d197a5f0c0c71f42a66cb8313074619 2024-12-09T01:51:29,476 DEBUG [RS_OPEN_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 5d197a5f0c0c71f42a66cb8313074619 2024-12-09T01:51:29,479 DEBUG [RS_OPEN_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45333/user/jenkins/test-data/8cc46a52-16d6-1efc-b9f5-b9a44f32340c/data/default/TestHBaseWalOnEC/5d197a5f0c0c71f42a66cb8313074619/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T01:51:29,479 INFO [RS_OPEN_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 5d197a5f0c0c71f42a66cb8313074619; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=68473308, jitterRate=0.02033179998397827}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-09T01:51:29,479 DEBUG [RS_OPEN_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 5d197a5f0c0c71f42a66cb8313074619 2024-12-09T01:51:29,480 DEBUG [RS_OPEN_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 5d197a5f0c0c71f42a66cb8313074619: Running coprocessor pre-open hook at 1733709089469Writing region info on filesystem at 1733709089469Initializing all the Stores at 1733709089470 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733709089470Cleaning up temporary data from old regions at 1733709089475 (+5 ms)Running coprocessor post-open hooks at 1733709089480 (+5 ms)Region opened successfully at 1733709089480 2024-12-09T01:51:29,482 INFO [RS_OPEN_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestHBaseWalOnEC,,1733709089105.5d197a5f0c0c71f42a66cb8313074619., pid=6, masterSystemTime=1733709089463 2024-12-09T01:51:29,485 DEBUG [RS_OPEN_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestHBaseWalOnEC,,1733709089105.5d197a5f0c0c71f42a66cb8313074619. 2024-12-09T01:51:29,485 INFO [RS_OPEN_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestHBaseWalOnEC,,1733709089105.5d197a5f0c0c71f42a66cb8313074619. 2024-12-09T01:51:29,486 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=5d197a5f0c0c71f42a66cb8313074619, regionState=OPEN, openSeqNum=2, regionLocation=ef6f18c58dc9,41039,1733709088070 2024-12-09T01:51:29,490 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-10-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 5d197a5f0c0c71f42a66cb8313074619, server=ef6f18c58dc9,41039,1733709088070 because future has completed 2024-12-09T01:51:29,496 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-09T01:51:29,496 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 5d197a5f0c0c71f42a66cb8313074619, server=ef6f18c58dc9,41039,1733709088070 in 183 msec 2024-12-09T01:51:29,500 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-09T01:51:29,500 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=5d197a5f0c0c71f42a66cb8313074619, ASSIGN in 347 msec 2024-12-09T01:51:29,501 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-09T01:51:29,502 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733709089502"}]},"ts":"1733709089502"} 2024-12-09T01:51:29,505 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLED in hbase:meta 2024-12-09T01:51:29,507 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_POST_OPERATION 2024-12-09T01:51:29,510 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC in 402 msec 2024-12-09T01:51:29,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33921 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-09T01:51:29,742 INFO [RPCClient-NioEventLoopGroup-6-9 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestHBaseWalOnEC completed 2024-12-09T01:51:29,742 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table TestHBaseWalOnEC get assigned. Timeout = 60000ms 2024-12-09T01:51:29,743 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-09T01:51:29,747 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table TestHBaseWalOnEC assigned to meta. Checking AM states. 2024-12-09T01:51:29,747 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-09T01:51:29,747 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table TestHBaseWalOnEC assigned. 2024-12-09T01:51:29,751 DEBUG [RPCClient-NioEventLoopGroup-6-8 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestHBaseWalOnEC', row='row', locateType=CURRENT is [region=TestHBaseWalOnEC,,1733709089105.5d197a5f0c0c71f42a66cb8313074619., hostname=ef6f18c58dc9,41039,1733709088070, seqNum=2] 2024-12-09T01:51:29,755 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33921 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestHBaseWalOnEC 2024-12-09T01:51:29,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33921 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC 2024-12-09T01:51:29,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33921 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-09T01:51:29,759 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_PREPARE 2024-12-09T01:51:29,760 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-09T01:51:29,760 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-09T01:51:29,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33921 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-09T01:51:29,902 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-09T01:51:29,908 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:51:29,915 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41039 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-12-09T01:51:29,916 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ef6f18c58dc9:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestHBaseWalOnEC,,1733709089105.5d197a5f0c0c71f42a66cb8313074619. 2024-12-09T01:51:29,916 INFO [RS_FLUSH_OPERATIONS-regionserver/ef6f18c58dc9:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing 5d197a5f0c0c71f42a66cb8313074619 1/1 column families, dataSize=32 B heapSize=360 B 2024-12-09T01:51:29,941 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ef6f18c58dc9:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45333/user/jenkins/test-data/8cc46a52-16d6-1efc-b9f5-b9a44f32340c/data/default/TestHBaseWalOnEC/5d197a5f0c0c71f42a66cb8313074619/.tmp/cf/5822d42584ed4ede99f7b26a7f0ceaf6 is 36, key is row/cf:cq/1733709089752/Put/seqid=0 2024-12-09T01:51:29,950 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40449 is added to blk_1073741839_1015 (size=4787) 2024-12-09T01:51:29,950 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37693 is added to blk_1073741839_1015 (size=4787) 2024-12-09T01:51:29,951 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42527 is added to blk_1073741839_1015 (size=4787) 2024-12-09T01:51:29,952 INFO [RS_FLUSH_OPERATIONS-regionserver/ef6f18c58dc9:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=32 B at sequenceid=5 (bloomFilter=false), to=hdfs://localhost:45333/user/jenkins/test-data/8cc46a52-16d6-1efc-b9f5-b9a44f32340c/data/default/TestHBaseWalOnEC/5d197a5f0c0c71f42a66cb8313074619/.tmp/cf/5822d42584ed4ede99f7b26a7f0ceaf6 2024-12-09T01:51:29,962 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ef6f18c58dc9:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45333/user/jenkins/test-data/8cc46a52-16d6-1efc-b9f5-b9a44f32340c/data/default/TestHBaseWalOnEC/5d197a5f0c0c71f42a66cb8313074619/.tmp/cf/5822d42584ed4ede99f7b26a7f0ceaf6 as hdfs://localhost:45333/user/jenkins/test-data/8cc46a52-16d6-1efc-b9f5-b9a44f32340c/data/default/TestHBaseWalOnEC/5d197a5f0c0c71f42a66cb8313074619/cf/5822d42584ed4ede99f7b26a7f0ceaf6 2024-12-09T01:51:29,971 INFO [RS_FLUSH_OPERATIONS-regionserver/ef6f18c58dc9:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45333/user/jenkins/test-data/8cc46a52-16d6-1efc-b9f5-b9a44f32340c/data/default/TestHBaseWalOnEC/5d197a5f0c0c71f42a66cb8313074619/cf/5822d42584ed4ede99f7b26a7f0ceaf6, entries=1, sequenceid=5, filesize=4.7 K 2024-12-09T01:51:29,972 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:51:29,972 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:51:29,973 INFO [RS_FLUSH_OPERATIONS-regionserver/ef6f18c58dc9:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~32 B/32, heapSize ~344 B/344, currentSize=0 B/0 for 5d197a5f0c0c71f42a66cb8313074619 in 57ms, sequenceid=5, compaction requested=false 2024-12-09T01:51:29,973 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ef6f18c58dc9:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for 5d197a5f0c0c71f42a66cb8313074619: 2024-12-09T01:51:29,973 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ef6f18c58dc9:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestHBaseWalOnEC,,1733709089105.5d197a5f0c0c71f42a66cb8313074619. 2024-12-09T01:51:29,973 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ef6f18c58dc9:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-12-09T01:51:29,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33921 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-12-09T01:51:29,981 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-12-09T01:51:29,981 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 217 msec 2024-12-09T01:51:29,986 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC in 228 msec 2024-12-09T01:51:30,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33921 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-09T01:51:30,072 INFO [RPCClient-NioEventLoopGroup-6-9 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestHBaseWalOnEC completed 2024-12-09T01:51:30,077 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-09T01:51:30,077 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-09T01:51:30,077 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T01:51:30,077 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T01:51:30,077 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T01:51:30,078 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-09T01:51:30,078 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-09T01:51:30,078 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=821721347, stopped=false 2024-12-09T01:51:30,078 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=ef6f18c58dc9,33921,1733709087948 2024-12-09T01:51:30,080 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46367-0x1007478fd9d0001, quorum=127.0.0.1:63103, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-09T01:51:30,080 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33921-0x1007478fd9d0000, quorum=127.0.0.1:63103, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-09T01:51:30,080 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41039-0x1007478fd9d0003, quorum=127.0.0.1:63103, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-09T01:51:30,080 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41835-0x1007478fd9d0002, quorum=127.0.0.1:63103, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-09T01:51:30,080 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41039-0x1007478fd9d0003, quorum=127.0.0.1:63103, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T01:51:30,080 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41835-0x1007478fd9d0002, quorum=127.0.0.1:63103, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T01:51:30,080 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46367-0x1007478fd9d0001, quorum=127.0.0.1:63103, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T01:51:30,080 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33921-0x1007478fd9d0000, quorum=127.0.0.1:63103, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T01:51:30,081 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-09T01:51:30,081 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:33921-0x1007478fd9d0000, quorum=127.0.0.1:63103, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T01:51:30,081 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:46367-0x1007478fd9d0001, quorum=127.0.0.1:63103, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T01:51:30,081 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-09T01:51:30,081 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:41835-0x1007478fd9d0002, quorum=127.0.0.1:63103, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T01:51:30,082 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:41039-0x1007478fd9d0003, quorum=127.0.0.1:63103, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T01:51:30,082 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T01:51:30,082 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T01:51:30,083 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'ef6f18c58dc9,46367,1733709088002' ***** 2024-12-09T01:51:30,083 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-09T01:51:30,083 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'ef6f18c58dc9,41835,1733709088034' ***** 2024-12-09T01:51:30,083 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-09T01:51:30,083 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'ef6f18c58dc9,41039,1733709088070' ***** 2024-12-09T01:51:30,083 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-09T01:51:30,083 INFO [RS:1;ef6f18c58dc9:41835 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-09T01:51:30,083 INFO [RS:0;ef6f18c58dc9:46367 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-09T01:51:30,083 INFO [RS:2;ef6f18c58dc9:41039 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-09T01:51:30,083 INFO [RS:0;ef6f18c58dc9:46367 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-09T01:51:30,083 INFO [RS:1;ef6f18c58dc9:41835 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-09T01:51:30,083 INFO [RS:2;ef6f18c58dc9:41039 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-09T01:51:30,084 INFO [RS:0;ef6f18c58dc9:46367 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-09T01:51:30,084 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-09T01:51:30,084 INFO [RS:2;ef6f18c58dc9:41039 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-09T01:51:30,084 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-09T01:51:30,084 INFO [RS:0;ef6f18c58dc9:46367 {}] regionserver.HRegionServer(959): stopping server ef6f18c58dc9,46367,1733709088002 2024-12-09T01:51:30,084 INFO [RS:2;ef6f18c58dc9:41039 {}] regionserver.HRegionServer(3091): Received CLOSE for 5d197a5f0c0c71f42a66cb8313074619 2024-12-09T01:51:30,084 INFO [RS:0;ef6f18c58dc9:46367 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-09T01:51:30,085 INFO [RS:0;ef6f18c58dc9:46367 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;ef6f18c58dc9:46367. 2024-12-09T01:51:30,085 DEBUG [RS:0;ef6f18c58dc9:46367 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T01:51:30,085 DEBUG [RS:0;ef6f18c58dc9:46367 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T01:51:30,085 INFO [RS:2;ef6f18c58dc9:41039 {}] regionserver.HRegionServer(959): stopping server ef6f18c58dc9,41039,1733709088070 2024-12-09T01:51:30,085 INFO [RS:2;ef6f18c58dc9:41039 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-09T01:51:30,085 INFO [RS:2;ef6f18c58dc9:41039 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:2;ef6f18c58dc9:41039. 2024-12-09T01:51:30,085 DEBUG [RS:2;ef6f18c58dc9:41039 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T01:51:30,084 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-09T01:51:30,085 DEBUG [RS:2;ef6f18c58dc9:41039 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T01:51:30,085 INFO [RS:2;ef6f18c58dc9:41039 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-09T01:51:30,085 INFO [RS:2;ef6f18c58dc9:41039 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-09T01:51:30,085 INFO [RS:2;ef6f18c58dc9:41039 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-09T01:51:30,085 INFO [RS:0;ef6f18c58dc9:46367 {}] regionserver.HRegionServer(976): stopping server ef6f18c58dc9,46367,1733709088002; all regions closed. 2024-12-09T01:51:30,086 DEBUG [RS_CLOSE_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 5d197a5f0c0c71f42a66cb8313074619, disabling compactions & flushes 2024-12-09T01:51:30,086 INFO [RS:2;ef6f18c58dc9:41039 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-09T01:51:30,086 INFO [RS_CLOSE_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1733709089105.5d197a5f0c0c71f42a66cb8313074619. 2024-12-09T01:51:30,086 DEBUG [RS_CLOSE_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1733709089105.5d197a5f0c0c71f42a66cb8313074619. 2024-12-09T01:51:30,084 INFO [RS:1;ef6f18c58dc9:41835 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-09T01:51:30,086 DEBUG [RS_CLOSE_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1733709089105.5d197a5f0c0c71f42a66cb8313074619. after waiting 0 ms 2024-12-09T01:51:30,086 INFO [RS:1;ef6f18c58dc9:41835 {}] regionserver.HRegionServer(959): stopping server ef6f18c58dc9,41835,1733709088034 2024-12-09T01:51:30,086 DEBUG [RS_CLOSE_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1733709089105.5d197a5f0c0c71f42a66cb8313074619. 2024-12-09T01:51:30,086 INFO [RS:1;ef6f18c58dc9:41835 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-09T01:51:30,086 INFO [RS:1;ef6f18c58dc9:41835 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;ef6f18c58dc9:41835. 2024-12-09T01:51:30,086 INFO [RS:2;ef6f18c58dc9:41039 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-12-09T01:51:30,086 DEBUG [RS:2;ef6f18c58dc9:41039 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, 5d197a5f0c0c71f42a66cb8313074619=TestHBaseWalOnEC,,1733709089105.5d197a5f0c0c71f42a66cb8313074619.} 2024-12-09T01:51:30,086 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:51:30,086 DEBUG [RS:1;ef6f18c58dc9:41835 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T01:51:30,086 DEBUG [RS:2;ef6f18c58dc9:41039 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 5d197a5f0c0c71f42a66cb8313074619 2024-12-09T01:51:30,086 DEBUG [RS_CLOSE_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-09T01:51:30,086 DEBUG [RS:1;ef6f18c58dc9:41835 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T01:51:30,086 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:51:30,086 INFO [RS_CLOSE_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-09T01:51:30,086 DEBUG [RS_CLOSE_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-09T01:51:30,086 INFO [RS:1;ef6f18c58dc9:41835 {}] regionserver.HRegionServer(976): stopping server ef6f18c58dc9,41835,1733709088034; all regions closed. 2024-12-09T01:51:30,086 DEBUG [RS_CLOSE_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-09T01:51:30,086 DEBUG [RS_CLOSE_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-09T01:51:30,086 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:51:30,087 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:51:30,087 INFO [RS_CLOSE_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.34 KB heapSize=3.38 KB 2024-12-09T01:51:30,087 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:51:30,089 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:51:30,089 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:51:30,089 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:51:30,089 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:51:30,090 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:51:30,091 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42527 is added to blk_1073741834_1010 (size=93) 2024-12-09T01:51:30,091 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37693 is added to blk_1073741834_1010 (size=93) 2024-12-09T01:51:30,095 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40449 is added to blk_1073741834_1010 (size=93) 2024-12-09T01:51:30,096 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37693 is added to blk_1073741835_1011 (size=93) 2024-12-09T01:51:30,096 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42527 is added to blk_1073741835_1011 (size=93) 2024-12-09T01:51:30,096 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40449 is added to blk_1073741835_1011 (size=93) 2024-12-09T01:51:30,098 DEBUG [RS_CLOSE_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45333/user/jenkins/test-data/8cc46a52-16d6-1efc-b9f5-b9a44f32340c/data/default/TestHBaseWalOnEC/5d197a5f0c0c71f42a66cb8313074619/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-09T01:51:30,098 DEBUG [RS:0;ef6f18c58dc9:46367 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/8cc46a52-16d6-1efc-b9f5-b9a44f32340c/oldWALs 2024-12-09T01:51:30,098 INFO [RS:0;ef6f18c58dc9:46367 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog ef6f18c58dc9%2C46367%2C1733709088002:(num 1733709088594) 2024-12-09T01:51:30,098 DEBUG [RS:0;ef6f18c58dc9:46367 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T01:51:30,098 INFO [RS:0;ef6f18c58dc9:46367 {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T01:51:30,098 INFO [RS:0;ef6f18c58dc9:46367 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-09T01:51:30,099 INFO [RS:0;ef6f18c58dc9:46367 {}] hbase.ChoreService(370): Chore service for: regionserver/ef6f18c58dc9:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-12-09T01:51:30,099 INFO [RS:0;ef6f18c58dc9:46367 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-09T01:51:30,099 INFO [regionserver/ef6f18c58dc9:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-09T01:51:30,099 INFO [RS:0;ef6f18c58dc9:46367 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-09T01:51:30,099 INFO [RS:0;ef6f18c58dc9:46367 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-09T01:51:30,099 INFO [RS_CLOSE_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1733709089105.5d197a5f0c0c71f42a66cb8313074619. 2024-12-09T01:51:30,099 INFO [RS:0;ef6f18c58dc9:46367 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-09T01:51:30,099 DEBUG [RS_CLOSE_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 5d197a5f0c0c71f42a66cb8313074619: Waiting for close lock at 1733709090085Running coprocessor pre-close hooks at 1733709090085Disabling compacts and flushes for region at 1733709090085Disabling writes for close at 1733709090086 (+1 ms)Writing region close event to WAL at 1733709090088 (+2 ms)Running coprocessor post-close hooks at 1733709090099 (+11 ms)Closed at 1733709090099 2024-12-09T01:51:30,099 INFO [RS:0;ef6f18c58dc9:46367 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:46367 2024-12-09T01:51:30,099 DEBUG [RS_CLOSE_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestHBaseWalOnEC,,1733709089105.5d197a5f0c0c71f42a66cb8313074619. 2024-12-09T01:51:30,102 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46367-0x1007478fd9d0001, quorum=127.0.0.1:63103, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/ef6f18c58dc9,46367,1733709088002 2024-12-09T01:51:30,102 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33921-0x1007478fd9d0000, quorum=127.0.0.1:63103, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-09T01:51:30,102 INFO [RS:0;ef6f18c58dc9:46367 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-09T01:51:30,104 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [ef6f18c58dc9,46367,1733709088002] 2024-12-09T01:51:30,106 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/ef6f18c58dc9,46367,1733709088002 already deleted, retry=false 2024-12-09T01:51:30,106 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; ef6f18c58dc9,46367,1733709088002 expired; onlineServers=2 2024-12-09T01:51:30,112 DEBUG [RS_CLOSE_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45333/user/jenkins/test-data/8cc46a52-16d6-1efc-b9f5-b9a44f32340c/data/hbase/meta/1588230740/.tmp/info/2a18f01fec154ba3a8e282efa3fc7918 is 153, key is TestHBaseWalOnEC,,1733709089105.5d197a5f0c0c71f42a66cb8313074619./info:regioninfo/1733709089486/Put/seqid=0 2024-12-09T01:51:30,119 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42527 is added to blk_1073741840_1016 (size=6637) 2024-12-09T01:51:30,120 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37693 is added to blk_1073741840_1016 (size=6637) 2024-12-09T01:51:30,120 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40449 is added to blk_1073741840_1016 (size=6637) 2024-12-09T01:51:30,121 INFO [RS_CLOSE_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.18 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:45333/user/jenkins/test-data/8cc46a52-16d6-1efc-b9f5-b9a44f32340c/data/hbase/meta/1588230740/.tmp/info/2a18f01fec154ba3a8e282efa3fc7918 2024-12-09T01:51:30,145 DEBUG [RS_CLOSE_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45333/user/jenkins/test-data/8cc46a52-16d6-1efc-b9f5-b9a44f32340c/data/hbase/meta/1588230740/.tmp/ns/b5c07b32967d4393ab1761de98822ebe is 43, key is default/ns:d/1733709088995/Put/seqid=0 2024-12-09T01:51:30,153 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40449 is added to blk_1073741841_1017 (size=5153) 2024-12-09T01:51:30,153 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37693 is added to blk_1073741841_1017 (size=5153) 2024-12-09T01:51:30,154 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42527 is added to blk_1073741841_1017 (size=5153) 2024-12-09T01:51:30,154 INFO [RS_CLOSE_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:45333/user/jenkins/test-data/8cc46a52-16d6-1efc-b9f5-b9a44f32340c/data/hbase/meta/1588230740/.tmp/ns/b5c07b32967d4393ab1761de98822ebe 2024-12-09T01:51:30,156 INFO [regionserver/ef6f18c58dc9:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T01:51:30,157 INFO [regionserver/ef6f18c58dc9:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T01:51:30,159 INFO [regionserver/ef6f18c58dc9:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T01:51:30,180 DEBUG [RS_CLOSE_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45333/user/jenkins/test-data/8cc46a52-16d6-1efc-b9f5-b9a44f32340c/data/hbase/meta/1588230740/.tmp/table/6ac82de4be3f484eb90115f411a08920 is 52, key is TestHBaseWalOnEC/table:state/1733709089502/Put/seqid=0 2024-12-09T01:51:30,188 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42527 is added to blk_1073741842_1018 (size=5249) 2024-12-09T01:51:30,188 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40449 is added to blk_1073741842_1018 (size=5249) 2024-12-09T01:51:30,189 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37693 is added to blk_1073741842_1018 (size=5249) 2024-12-09T01:51:30,192 INFO [RS_CLOSE_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=96 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:45333/user/jenkins/test-data/8cc46a52-16d6-1efc-b9f5-b9a44f32340c/data/hbase/meta/1588230740/.tmp/table/6ac82de4be3f484eb90115f411a08920 2024-12-09T01:51:30,201 DEBUG [RS_CLOSE_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45333/user/jenkins/test-data/8cc46a52-16d6-1efc-b9f5-b9a44f32340c/data/hbase/meta/1588230740/.tmp/info/2a18f01fec154ba3a8e282efa3fc7918 as hdfs://localhost:45333/user/jenkins/test-data/8cc46a52-16d6-1efc-b9f5-b9a44f32340c/data/hbase/meta/1588230740/info/2a18f01fec154ba3a8e282efa3fc7918 2024-12-09T01:51:30,204 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46367-0x1007478fd9d0001, quorum=127.0.0.1:63103, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T01:51:30,204 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46367-0x1007478fd9d0001, quorum=127.0.0.1:63103, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T01:51:30,204 INFO [RS:0;ef6f18c58dc9:46367 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-09T01:51:30,204 INFO [RS:0;ef6f18c58dc9:46367 {}] regionserver.HRegionServer(1031): Exiting; stopping=ef6f18c58dc9,46367,1733709088002; zookeeper connection closed. 2024-12-09T01:51:30,204 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@18fc5e78 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@18fc5e78 2024-12-09T01:51:30,211 INFO [RS_CLOSE_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45333/user/jenkins/test-data/8cc46a52-16d6-1efc-b9f5-b9a44f32340c/data/hbase/meta/1588230740/info/2a18f01fec154ba3a8e282efa3fc7918, entries=10, sequenceid=11, filesize=6.5 K 2024-12-09T01:51:30,213 DEBUG [RS_CLOSE_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45333/user/jenkins/test-data/8cc46a52-16d6-1efc-b9f5-b9a44f32340c/data/hbase/meta/1588230740/.tmp/ns/b5c07b32967d4393ab1761de98822ebe as hdfs://localhost:45333/user/jenkins/test-data/8cc46a52-16d6-1efc-b9f5-b9a44f32340c/data/hbase/meta/1588230740/ns/b5c07b32967d4393ab1761de98822ebe 2024-12-09T01:51:30,222 INFO [RS_CLOSE_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45333/user/jenkins/test-data/8cc46a52-16d6-1efc-b9f5-b9a44f32340c/data/hbase/meta/1588230740/ns/b5c07b32967d4393ab1761de98822ebe, entries=2, sequenceid=11, filesize=5.0 K 2024-12-09T01:51:30,223 DEBUG [RS_CLOSE_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45333/user/jenkins/test-data/8cc46a52-16d6-1efc-b9f5-b9a44f32340c/data/hbase/meta/1588230740/.tmp/table/6ac82de4be3f484eb90115f411a08920 as hdfs://localhost:45333/user/jenkins/test-data/8cc46a52-16d6-1efc-b9f5-b9a44f32340c/data/hbase/meta/1588230740/table/6ac82de4be3f484eb90115f411a08920 2024-12-09T01:51:30,232 INFO [RS_CLOSE_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45333/user/jenkins/test-data/8cc46a52-16d6-1efc-b9f5-b9a44f32340c/data/hbase/meta/1588230740/table/6ac82de4be3f484eb90115f411a08920, entries=2, sequenceid=11, filesize=5.1 K 2024-12-09T01:51:30,234 INFO [RS_CLOSE_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 148ms, sequenceid=11, compaction requested=false 2024-12-09T01:51:30,240 DEBUG [RS_CLOSE_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45333/user/jenkins/test-data/8cc46a52-16d6-1efc-b9f5-b9a44f32340c/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-12-09T01:51:30,241 DEBUG [RS_CLOSE_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-09T01:51:30,241 INFO [RS_CLOSE_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-09T01:51:30,241 DEBUG [RS_CLOSE_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733709090086Running coprocessor pre-close hooks at 1733709090086Disabling compacts and flushes for region at 1733709090086Disabling writes for close at 1733709090086Obtaining lock to block concurrent updates at 1733709090087 (+1 ms)Preparing flush snapshotting stores in 1588230740 at 1733709090087Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1377, getHeapSize=3392, getOffHeapSize=0, getCellsCount=14 at 1733709090087Flushing stores of hbase:meta,,1.1588230740 at 1733709090088 (+1 ms)Flushing 1588230740/info: creating writer at 1733709090089 (+1 ms)Flushing 1588230740/info: appending metadata at 1733709090112 (+23 ms)Flushing 1588230740/info: closing flushed file at 1733709090112Flushing 1588230740/ns: creating writer at 1733709090129 (+17 ms)Flushing 1588230740/ns: appending metadata at 1733709090145 (+16 ms)Flushing 1588230740/ns: closing flushed file at 1733709090145Flushing 1588230740/table: creating writer at 1733709090163 (+18 ms)Flushing 1588230740/table: appending metadata at 1733709090180 (+17 ms)Flushing 1588230740/table: closing flushed file at 1733709090180Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@76b39aad: reopening flushed file at 1733709090200 (+20 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4733a3c7: reopening flushed file at 1733709090212 (+12 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@632896f7: reopening flushed file at 1733709090222 (+10 ms)Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 148ms, sequenceid=11, compaction requested=false at 1733709090234 (+12 ms)Writing region close event to WAL at 1733709090235 (+1 ms)Running coprocessor post-close hooks at 1733709090240 (+5 ms)Closed at 1733709090241 (+1 ms) 2024-12-09T01:51:30,241 DEBUG [RS_CLOSE_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-09T01:51:30,286 INFO [RS:2;ef6f18c58dc9:41039 {}] regionserver.HRegionServer(976): stopping server ef6f18c58dc9,41039,1733709088070; all regions closed. 2024-12-09T01:51:30,287 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:51:30,287 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:51:30,287 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:51:30,288 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:51:30,288 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:51:30,291 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37693 is added to blk_1073741836_1012 (size=2751) 2024-12-09T01:51:30,292 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42527 is added to blk_1073741836_1012 (size=2751) 2024-12-09T01:51:30,293 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40449 is added to blk_1073741836_1012 (size=2751) 2024-12-09T01:51:30,298 DEBUG [RS:2;ef6f18c58dc9:41039 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/8cc46a52-16d6-1efc-b9f5-b9a44f32340c/oldWALs 2024-12-09T01:51:30,298 INFO [RS:2;ef6f18c58dc9:41039 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog ef6f18c58dc9%2C41039%2C1733709088070.meta:.meta(num 1733709088933) 2024-12-09T01:51:30,302 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:51:30,302 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:51:30,303 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:51:30,303 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:51:30,303 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:51:30,306 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42527 is added to blk_1073741833_1009 (size=1298) 2024-12-09T01:51:30,306 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40449 is added to blk_1073741833_1009 (size=1298) 2024-12-09T01:51:30,307 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37693 is added to blk_1073741833_1009 (size=1298) 2024-12-09T01:51:30,310 DEBUG [RS:2;ef6f18c58dc9:41039 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/8cc46a52-16d6-1efc-b9f5-b9a44f32340c/oldWALs 2024-12-09T01:51:30,310 INFO [RS:2;ef6f18c58dc9:41039 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog ef6f18c58dc9%2C41039%2C1733709088070:(num 1733709088593) 2024-12-09T01:51:30,310 DEBUG [RS:2;ef6f18c58dc9:41039 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T01:51:30,310 INFO [RS:2;ef6f18c58dc9:41039 {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T01:51:30,311 INFO [RS:2;ef6f18c58dc9:41039 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-09T01:51:30,311 INFO [RS:2;ef6f18c58dc9:41039 {}] hbase.ChoreService(370): Chore service for: regionserver/ef6f18c58dc9:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-09T01:51:30,311 INFO [RS:2;ef6f18c58dc9:41039 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-09T01:51:30,311 INFO [regionserver/ef6f18c58dc9:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-09T01:51:30,311 INFO [RS:2;ef6f18c58dc9:41039 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:41039 2024-12-09T01:51:30,314 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33921-0x1007478fd9d0000, quorum=127.0.0.1:63103, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-09T01:51:30,314 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41039-0x1007478fd9d0003, quorum=127.0.0.1:63103, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/ef6f18c58dc9,41039,1733709088070 2024-12-09T01:51:30,315 INFO [RS:2;ef6f18c58dc9:41039 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-09T01:51:30,315 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [ef6f18c58dc9,41039,1733709088070] 2024-12-09T01:51:30,317 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/ef6f18c58dc9,41039,1733709088070 already deleted, retry=false 2024-12-09T01:51:30,317 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; ef6f18c58dc9,41039,1733709088070 expired; onlineServers=1 2024-12-09T01:51:30,417 INFO [RS:2;ef6f18c58dc9:41039 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-09T01:51:30,417 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41039-0x1007478fd9d0003, quorum=127.0.0.1:63103, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T01:51:30,417 INFO [RS:2;ef6f18c58dc9:41039 {}] regionserver.HRegionServer(1031): Exiting; stopping=ef6f18c58dc9,41039,1733709088070; zookeeper connection closed. 2024-12-09T01:51:30,417 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41039-0x1007478fd9d0003, quorum=127.0.0.1:63103, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T01:51:30,417 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@44856e14 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@44856e14 2024-12-09T01:51:30,455 INFO [regionserver/ef6f18c58dc9:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-09T01:51:30,455 INFO [regionserver/ef6f18c58dc9:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-09T01:51:30,499 DEBUG [RS:1;ef6f18c58dc9:41835 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/8cc46a52-16d6-1efc-b9f5-b9a44f32340c/oldWALs 2024-12-09T01:51:30,499 INFO [RS:1;ef6f18c58dc9:41835 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog ef6f18c58dc9%2C41835%2C1733709088034:(num 1733709088605) 2024-12-09T01:51:30,499 DEBUG [RS:1;ef6f18c58dc9:41835 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T01:51:30,499 INFO [RS:1;ef6f18c58dc9:41835 {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T01:51:30,499 INFO [RS:1;ef6f18c58dc9:41835 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-09T01:51:30,499 INFO [RS:1;ef6f18c58dc9:41835 {}] hbase.ChoreService(370): Chore service for: regionserver/ef6f18c58dc9:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-09T01:51:30,499 INFO [RS:1;ef6f18c58dc9:41835 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-09T01:51:30,499 INFO [RS:1;ef6f18c58dc9:41835 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-09T01:51:30,499 INFO [RS:1;ef6f18c58dc9:41835 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-09T01:51:30,499 INFO [regionserver/ef6f18c58dc9:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-09T01:51:30,499 INFO [RS:1;ef6f18c58dc9:41835 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-09T01:51:30,499 INFO [RS:1;ef6f18c58dc9:41835 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:41835 2024-12-09T01:51:30,502 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41835-0x1007478fd9d0002, quorum=127.0.0.1:63103, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/ef6f18c58dc9,41835,1733709088034 2024-12-09T01:51:30,502 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33921-0x1007478fd9d0000, quorum=127.0.0.1:63103, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-09T01:51:30,502 INFO [RS:1;ef6f18c58dc9:41835 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-09T01:51:30,503 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [ef6f18c58dc9,41835,1733709088034] 2024-12-09T01:51:30,505 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/ef6f18c58dc9,41835,1733709088034 already deleted, retry=false 2024-12-09T01:51:30,505 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; ef6f18c58dc9,41835,1733709088034 expired; onlineServers=0 2024-12-09T01:51:30,505 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'ef6f18c58dc9,33921,1733709087948' ***** 2024-12-09T01:51:30,505 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-09T01:51:30,505 INFO [M:0;ef6f18c58dc9:33921 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-09T01:51:30,505 INFO [M:0;ef6f18c58dc9:33921 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-09T01:51:30,505 DEBUG [M:0;ef6f18c58dc9:33921 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-09T01:51:30,505 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-09T01:51:30,505 DEBUG [M:0;ef6f18c58dc9:33921 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-09T01:51:30,505 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster-HFileCleaner.large.0-1733709088311 {}] cleaner.HFileCleaner(306): Exit Thread[master/ef6f18c58dc9:0:becomeActiveMaster-HFileCleaner.large.0-1733709088311,5,FailOnTimeoutGroup] 2024-12-09T01:51:30,505 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster-HFileCleaner.small.0-1733709088311 {}] cleaner.HFileCleaner(306): Exit Thread[master/ef6f18c58dc9:0:becomeActiveMaster-HFileCleaner.small.0-1733709088311,5,FailOnTimeoutGroup] 2024-12-09T01:51:30,506 INFO [M:0;ef6f18c58dc9:33921 {}] hbase.ChoreService(370): Chore service for: master/ef6f18c58dc9:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-09T01:51:30,506 INFO [M:0;ef6f18c58dc9:33921 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-09T01:51:30,506 DEBUG [M:0;ef6f18c58dc9:33921 {}] master.HMaster(1795): Stopping service threads 2024-12-09T01:51:30,506 INFO [M:0;ef6f18c58dc9:33921 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-09T01:51:30,506 INFO [M:0;ef6f18c58dc9:33921 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-09T01:51:30,506 INFO [M:0;ef6f18c58dc9:33921 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-09T01:51:30,506 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-09T01:51:30,507 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33921-0x1007478fd9d0000, quorum=127.0.0.1:63103, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-09T01:51:30,507 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33921-0x1007478fd9d0000, quorum=127.0.0.1:63103, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T01:51:30,507 DEBUG [M:0;ef6f18c58dc9:33921 {}] zookeeper.ZKUtil(347): master:33921-0x1007478fd9d0000, quorum=127.0.0.1:63103, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-09T01:51:30,507 WARN [M:0;ef6f18c58dc9:33921 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-09T01:51:30,508 INFO [M:0;ef6f18c58dc9:33921 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:45333/user/jenkins/test-data/8cc46a52-16d6-1efc-b9f5-b9a44f32340c/.lastflushedseqids 2024-12-09T01:51:30,510 WARN [IPC Server handler 1 on default port 45333 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-09T01:51:30,510 WARN [IPC Server handler 1 on default port 45333 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-09T01:51:30,511 WARN [IPC Server handler 1 on default port 45333 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-09T01:51:30,517 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40449 is added to blk_1073741843_1019 (size=127) 2024-12-09T01:51:30,517 INFO [M:0;ef6f18c58dc9:33921 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-09T01:51:30,517 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37693 is added to blk_1073741843_1019 (size=127) 2024-12-09T01:51:30,518 INFO [M:0;ef6f18c58dc9:33921 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-09T01:51:30,518 DEBUG [M:0;ef6f18c58dc9:33921 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-09T01:51:30,518 INFO [M:0;ef6f18c58dc9:33921 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T01:51:30,518 DEBUG [M:0;ef6f18c58dc9:33921 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T01:51:30,518 DEBUG [M:0;ef6f18c58dc9:33921 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-09T01:51:30,518 DEBUG [M:0;ef6f18c58dc9:33921 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T01:51:30,518 INFO [M:0;ef6f18c58dc9:33921 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=26.84 KB heapSize=34.13 KB 2024-12-09T01:51:30,536 DEBUG [M:0;ef6f18c58dc9:33921 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45333/user/jenkins/test-data/8cc46a52-16d6-1efc-b9f5-b9a44f32340c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/7621cc96960a4134b68c070d2074162b is 82, key is hbase:meta,,1/info:regioninfo/1733709088975/Put/seqid=0 2024-12-09T01:51:30,538 WARN [IPC Server handler 2 on default port 45333 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-09T01:51:30,538 WARN [IPC Server handler 2 on default port 45333 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-09T01:51:30,538 WARN [IPC Server handler 2 on default port 45333 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-09T01:51:30,543 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37693 is added to blk_1073741844_1020 (size=5672) 2024-12-09T01:51:30,544 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40449 is added to blk_1073741844_1020 (size=5672) 2024-12-09T01:51:30,544 INFO [M:0;ef6f18c58dc9:33921 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:45333/user/jenkins/test-data/8cc46a52-16d6-1efc-b9f5-b9a44f32340c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/7621cc96960a4134b68c070d2074162b 2024-12-09T01:51:30,568 DEBUG [M:0;ef6f18c58dc9:33921 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45333/user/jenkins/test-data/8cc46a52-16d6-1efc-b9f5-b9a44f32340c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/34a74cb84c0c46189fee05af32a301d8 is 748, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1733709089508/Put/seqid=0 2024-12-09T01:51:30,570 WARN [IPC Server handler 0 on default port 45333 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-09T01:51:30,570 WARN [IPC Server handler 0 on default port 45333 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-09T01:51:30,570 WARN [IPC Server handler 0 on default port 45333 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-09T01:51:30,575 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37693 is added to blk_1073741845_1021 (size=6440) 2024-12-09T01:51:30,576 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40449 is added to blk_1073741845_1021 (size=6440) 2024-12-09T01:51:30,577 INFO [M:0;ef6f18c58dc9:33921 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.15 KB at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:45333/user/jenkins/test-data/8cc46a52-16d6-1efc-b9f5-b9a44f32340c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/34a74cb84c0c46189fee05af32a301d8 2024-12-09T01:51:30,600 DEBUG [M:0;ef6f18c58dc9:33921 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45333/user/jenkins/test-data/8cc46a52-16d6-1efc-b9f5-b9a44f32340c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/3f5220ecdf534802af6794ff9d75bd8a is 69, key is ef6f18c58dc9,41039,1733709088070/rs:state/1733709088414/Put/seqid=0 2024-12-09T01:51:30,601 WARN [IPC Server handler 3 on default port 45333 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-09T01:51:30,601 WARN [IPC Server handler 3 on default port 45333 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-09T01:51:30,601 WARN [IPC Server handler 3 on default port 45333 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-09T01:51:30,604 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41835-0x1007478fd9d0002, quorum=127.0.0.1:63103, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T01:51:30,604 INFO [RS:1;ef6f18c58dc9:41835 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-09T01:51:30,604 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41835-0x1007478fd9d0002, quorum=127.0.0.1:63103, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T01:51:30,604 INFO [RS:1;ef6f18c58dc9:41835 {}] regionserver.HRegionServer(1031): Exiting; stopping=ef6f18c58dc9,41835,1733709088034; zookeeper connection closed. 2024-12-09T01:51:30,604 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@3cacf0fe {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@3cacf0fe 2024-12-09T01:51:30,604 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 3 regionserver(s) complete 2024-12-09T01:51:30,606 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37693 is added to blk_1073741846_1022 (size=5294) 2024-12-09T01:51:30,607 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40449 is added to blk_1073741846_1022 (size=5294) 2024-12-09T01:51:30,607 INFO [M:0;ef6f18c58dc9:33921 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=195 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:45333/user/jenkins/test-data/8cc46a52-16d6-1efc-b9f5-b9a44f32340c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/3f5220ecdf534802af6794ff9d75bd8a 2024-12-09T01:51:30,615 DEBUG [M:0;ef6f18c58dc9:33921 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45333/user/jenkins/test-data/8cc46a52-16d6-1efc-b9f5-b9a44f32340c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/7621cc96960a4134b68c070d2074162b as hdfs://localhost:45333/user/jenkins/test-data/8cc46a52-16d6-1efc-b9f5-b9a44f32340c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/7621cc96960a4134b68c070d2074162b 2024-12-09T01:51:30,623 INFO [M:0;ef6f18c58dc9:33921 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45333/user/jenkins/test-data/8cc46a52-16d6-1efc-b9f5-b9a44f32340c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/7621cc96960a4134b68c070d2074162b, entries=8, sequenceid=72, filesize=5.5 K 2024-12-09T01:51:30,624 DEBUG [M:0;ef6f18c58dc9:33921 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45333/user/jenkins/test-data/8cc46a52-16d6-1efc-b9f5-b9a44f32340c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/34a74cb84c0c46189fee05af32a301d8 as hdfs://localhost:45333/user/jenkins/test-data/8cc46a52-16d6-1efc-b9f5-b9a44f32340c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/34a74cb84c0c46189fee05af32a301d8 2024-12-09T01:51:30,632 INFO [M:0;ef6f18c58dc9:33921 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45333/user/jenkins/test-data/8cc46a52-16d6-1efc-b9f5-b9a44f32340c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/34a74cb84c0c46189fee05af32a301d8, entries=8, sequenceid=72, filesize=6.3 K 2024-12-09T01:51:30,633 DEBUG [M:0;ef6f18c58dc9:33921 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45333/user/jenkins/test-data/8cc46a52-16d6-1efc-b9f5-b9a44f32340c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/3f5220ecdf534802af6794ff9d75bd8a as hdfs://localhost:45333/user/jenkins/test-data/8cc46a52-16d6-1efc-b9f5-b9a44f32340c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/3f5220ecdf534802af6794ff9d75bd8a 2024-12-09T01:51:30,640 INFO [M:0;ef6f18c58dc9:33921 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45333/user/jenkins/test-data/8cc46a52-16d6-1efc-b9f5-b9a44f32340c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/3f5220ecdf534802af6794ff9d75bd8a, entries=3, sequenceid=72, filesize=5.2 K 2024-12-09T01:51:30,642 INFO [M:0;ef6f18c58dc9:33921 {}] regionserver.HRegion(3140): Finished flush of dataSize ~26.84 KB/27480, heapSize ~33.83 KB/34640, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 123ms, sequenceid=72, compaction requested=false 2024-12-09T01:51:30,643 INFO [M:0;ef6f18c58dc9:33921 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T01:51:30,643 DEBUG [M:0;ef6f18c58dc9:33921 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733709090518Disabling compacts and flushes for region at 1733709090518Disabling writes for close at 1733709090518Obtaining lock to block concurrent updates at 1733709090518Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733709090518Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=27480, getHeapSize=34880, getOffHeapSize=0, getCellsCount=85 at 1733709090519 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733709090520 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733709090520Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733709090536 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733709090536Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733709090551 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733709090568 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733709090568Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733709090583 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733709090599 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733709090599Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4588a90f: reopening flushed file at 1733709090614 (+15 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3ba4d2c5: reopening flushed file at 1733709090623 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5df2fc01: reopening flushed file at 1733709090632 (+9 ms)Finished flush of dataSize ~26.84 KB/27480, heapSize ~33.83 KB/34640, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 123ms, sequenceid=72, compaction requested=false at 1733709090642 (+10 ms)Writing region close event to WAL at 1733709090643 (+1 ms)Closed at 1733709090643 2024-12-09T01:51:30,644 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:51:30,645 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:51:30,645 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:51:30,645 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:51:30,645 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:51:30,648 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42527 is added to blk_1073741830_1006 (size=32683) 2024-12-09T01:51:30,648 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37693 is added to blk_1073741830_1006 (size=32683) 2024-12-09T01:51:30,648 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40449 is added to blk_1073741830_1006 (size=32683) 2024-12-09T01:51:30,649 INFO [M:0;ef6f18c58dc9:33921 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-09T01:51:30,649 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-09T01:51:30,649 INFO [M:0;ef6f18c58dc9:33921 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:33921 2024-12-09T01:51:30,650 INFO [M:0;ef6f18c58dc9:33921 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-09T01:51:30,752 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33921-0x1007478fd9d0000, quorum=127.0.0.1:63103, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T01:51:30,752 INFO [M:0;ef6f18c58dc9:33921 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-09T01:51:30,752 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33921-0x1007478fd9d0000, quorum=127.0.0.1:63103, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T01:51:30,755 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1ef8f7bc{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T01:51:30,755 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@12529ca{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T01:51:30,755 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T01:51:30,756 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1b15d8c9{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T01:51:30,756 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@51a41d0b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/082ca5d0-a058-59eb-cf86-9d46374d7e98/hadoop.log.dir/,STOPPED} 2024-12-09T01:51:30,757 WARN [BP-403251070-172.17.0.2-1733709086950 heartbeating to localhost/127.0.0.1:45333 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T01:51:30,757 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T01:51:30,757 WARN [BP-403251070-172.17.0.2-1733709086950 heartbeating to localhost/127.0.0.1:45333 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-403251070-172.17.0.2-1733709086950 (Datanode Uuid 274dce51-4584-441b-a07d-f3c78c826bcc) service to localhost/127.0.0.1:45333 2024-12-09T01:51:30,757 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T01:51:30,758 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/082ca5d0-a058-59eb-cf86-9d46374d7e98/cluster_fcc1ab3e-2194-8be5-a30b-b316844c467b/data/data5/current/BP-403251070-172.17.0.2-1733709086950 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T01:51:30,758 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/082ca5d0-a058-59eb-cf86-9d46374d7e98/cluster_fcc1ab3e-2194-8be5-a30b-b316844c467b/data/data6/current/BP-403251070-172.17.0.2-1733709086950 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T01:51:30,758 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T01:51:30,761 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7f06ea44{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T01:51:30,761 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4b030921{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T01:51:30,761 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T01:51:30,761 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5c077530{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T01:51:30,762 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2aeebd66{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/082ca5d0-a058-59eb-cf86-9d46374d7e98/hadoop.log.dir/,STOPPED} 2024-12-09T01:51:30,763 WARN [BP-403251070-172.17.0.2-1733709086950 heartbeating to localhost/127.0.0.1:45333 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T01:51:30,763 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T01:51:30,763 WARN [BP-403251070-172.17.0.2-1733709086950 heartbeating to localhost/127.0.0.1:45333 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-403251070-172.17.0.2-1733709086950 (Datanode Uuid 3d30c5e5-d547-4dcb-af4c-48945ec8e8f6) service to localhost/127.0.0.1:45333 2024-12-09T01:51:30,763 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T01:51:30,764 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/082ca5d0-a058-59eb-cf86-9d46374d7e98/cluster_fcc1ab3e-2194-8be5-a30b-b316844c467b/data/data3/current/BP-403251070-172.17.0.2-1733709086950 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T01:51:30,764 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/082ca5d0-a058-59eb-cf86-9d46374d7e98/cluster_fcc1ab3e-2194-8be5-a30b-b316844c467b/data/data4/current/BP-403251070-172.17.0.2-1733709086950 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T01:51:30,764 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T01:51:30,766 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@71005823{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T01:51:30,767 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@10d3673d{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T01:51:30,767 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T01:51:30,767 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3dae6551{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T01:51:30,767 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5657ac77{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/082ca5d0-a058-59eb-cf86-9d46374d7e98/hadoop.log.dir/,STOPPED} 2024-12-09T01:51:30,769 WARN [BP-403251070-172.17.0.2-1733709086950 heartbeating to localhost/127.0.0.1:45333 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T01:51:30,769 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T01:51:30,769 WARN [BP-403251070-172.17.0.2-1733709086950 heartbeating to localhost/127.0.0.1:45333 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-403251070-172.17.0.2-1733709086950 (Datanode Uuid f188dbd1-73f4-4e6d-a520-8eac83fceca8) service to localhost/127.0.0.1:45333 2024-12-09T01:51:30,769 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T01:51:30,770 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/082ca5d0-a058-59eb-cf86-9d46374d7e98/cluster_fcc1ab3e-2194-8be5-a30b-b316844c467b/data/data1/current/BP-403251070-172.17.0.2-1733709086950 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T01:51:30,770 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/082ca5d0-a058-59eb-cf86-9d46374d7e98/cluster_fcc1ab3e-2194-8be5-a30b-b316844c467b/data/data2/current/BP-403251070-172.17.0.2-1733709086950 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T01:51:30,770 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T01:51:30,777 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5265f235{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-09T01:51:30,778 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@206988bd{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T01:51:30,778 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T01:51:30,778 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@58cabbb9{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T01:51:30,778 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@39a1ffce{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/082ca5d0-a058-59eb-cf86-9d46374d7e98/hadoop.log.dir/,STOPPED} 2024-12-09T01:51:30,785 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-09T01:51:30,809 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-09T01:51:30,818 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestHBaseWalOnEC#testReadWrite[1] Thread=145 (was 88) - Thread LEAK? -, OpenFileDescriptor=519 (was 441) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=246 (was 259), ProcessCount=11 (was 11), AvailableMemoryMB=11598 (was 11762)