2024-12-09 00:46:01,389 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba 2024-12-09 00:46:01,402 main DEBUG Took 0.010814 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-12-09 00:46:01,402 main DEBUG PluginManager 'Core' found 129 plugins 2024-12-09 00:46:01,403 main DEBUG PluginManager 'Level' found 0 plugins 2024-12-09 00:46:01,404 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-12-09 00:46:01,405 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 00:46:01,420 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-12-09 00:46:01,435 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 00:46:01,436 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 00:46:01,437 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 00:46:01,437 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 00:46:01,438 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 00:46:01,438 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 00:46:01,439 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 00:46:01,440 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 00:46:01,440 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 00:46:01,441 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 00:46:01,442 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 00:46:01,442 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 00:46:01,442 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 00:46:01,443 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 00:46:01,443 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 00:46:01,444 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 00:46:01,444 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 00:46:01,445 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 00:46:01,445 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 00:46:01,446 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 00:46:01,446 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 00:46:01,447 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 00:46:01,447 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 00:46:01,447 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 00:46:01,448 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 00:46:01,448 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-12-09 00:46:01,450 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 00:46:01,452 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-12-09 00:46:01,453 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-12-09 00:46:01,454 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-12-09 00:46:01,456 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-12-09 00:46:01,456 main DEBUG PluginManager 'Converter' found 47 plugins 2024-12-09 00:46:01,465 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-12-09 00:46:01,468 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-12-09 00:46:01,470 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-12-09 00:46:01,470 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-12-09 00:46:01,471 main DEBUG createAppenders(={Console}) 2024-12-09 00:46:01,472 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba initialized 2024-12-09 00:46:01,472 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba 2024-12-09 00:46:01,473 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba OK. 2024-12-09 00:46:01,473 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-12-09 00:46:01,474 main DEBUG OutputStream closed 2024-12-09 00:46:01,474 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-12-09 00:46:01,475 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-12-09 00:46:01,475 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@49c7b90e OK 2024-12-09 00:46:01,541 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-12-09 00:46:01,543 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-12-09 00:46:01,544 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-12-09 00:46:01,545 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-12-09 00:46:01,545 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-12-09 00:46:01,546 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-12-09 00:46:01,546 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-12-09 00:46:01,546 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-12-09 00:46:01,546 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-12-09 00:46:01,547 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-12-09 00:46:01,547 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-12-09 00:46:01,547 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-12-09 00:46:01,547 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-12-09 00:46:01,548 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-12-09 00:46:01,548 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-12-09 00:46:01,548 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-12-09 00:46:01,548 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-12-09 00:46:01,549 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-12-09 00:46:01,551 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-09 00:46:01,551 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-logging/target/hbase-logging-4.0.0-alpha-1-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@35432107) with optional ClassLoader: null 2024-12-09 00:46:01,551 main DEBUG Shutdown hook enabled. Registering a new one. 2024-12-09 00:46:01,552 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@35432107] started OK. 2024-12-09T00:46:01,565 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC timeout: 26 mins 2024-12-09 00:46:01,567 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-12-09 00:46:01,568 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-09T00:46:01,768 DEBUG [main {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fd1ddd71-8f9f-7adc-80ee-28e06904eeaf 2024-12-09T00:46:01,796 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fd1ddd71-8f9f-7adc-80ee-28e06904eeaf/cluster_2206ffd7-7a5c-39b4-24d8-aeab961c1001, deleteOnExit=true 2024-12-09T00:46:01,797 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fd1ddd71-8f9f-7adc-80ee-28e06904eeaf/test.cache.data in system properties and HBase conf 2024-12-09T00:46:01,797 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fd1ddd71-8f9f-7adc-80ee-28e06904eeaf/hadoop.tmp.dir in system properties and HBase conf 2024-12-09T00:46:01,798 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fd1ddd71-8f9f-7adc-80ee-28e06904eeaf/hadoop.log.dir in system properties and HBase conf 2024-12-09T00:46:01,798 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fd1ddd71-8f9f-7adc-80ee-28e06904eeaf/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-09T00:46:01,798 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fd1ddd71-8f9f-7adc-80ee-28e06904eeaf/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-09T00:46:01,799 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-09T00:46:01,876 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-12-09T00:46:01,954 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-09T00:46:01,957 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fd1ddd71-8f9f-7adc-80ee-28e06904eeaf/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-09T00:46:01,958 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fd1ddd71-8f9f-7adc-80ee-28e06904eeaf/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-09T00:46:01,958 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fd1ddd71-8f9f-7adc-80ee-28e06904eeaf/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-09T00:46:01,959 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fd1ddd71-8f9f-7adc-80ee-28e06904eeaf/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-09T00:46:01,959 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fd1ddd71-8f9f-7adc-80ee-28e06904eeaf/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-09T00:46:01,959 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fd1ddd71-8f9f-7adc-80ee-28e06904eeaf/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-09T00:46:01,960 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fd1ddd71-8f9f-7adc-80ee-28e06904eeaf/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-09T00:46:01,960 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fd1ddd71-8f9f-7adc-80ee-28e06904eeaf/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-09T00:46:01,960 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fd1ddd71-8f9f-7adc-80ee-28e06904eeaf/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-09T00:46:01,961 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fd1ddd71-8f9f-7adc-80ee-28e06904eeaf/nfs.dump.dir in system properties and HBase conf 2024-12-09T00:46:01,961 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fd1ddd71-8f9f-7adc-80ee-28e06904eeaf/java.io.tmpdir in system properties and HBase conf 2024-12-09T00:46:01,961 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fd1ddd71-8f9f-7adc-80ee-28e06904eeaf/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-09T00:46:01,962 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fd1ddd71-8f9f-7adc-80ee-28e06904eeaf/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-09T00:46:01,962 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fd1ddd71-8f9f-7adc-80ee-28e06904eeaf/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-09T00:46:02,904 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-12-09T00:46:02,968 INFO [Time-limited test {}] log.Log(170): Logging initialized @2161ms to org.eclipse.jetty.util.log.Slf4jLog 2024-12-09T00:46:03,031 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T00:46:03,084 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T00:46:03,103 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T00:46:03,103 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T00:46:03,104 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-09T00:46:03,115 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T00:46:03,118 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@b03c34d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fd1ddd71-8f9f-7adc-80ee-28e06904eeaf/hadoop.log.dir/,AVAILABLE} 2024-12-09T00:46:03,119 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3cbd6fd{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T00:46:03,282 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5cb83937{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fd1ddd71-8f9f-7adc-80ee-28e06904eeaf/java.io.tmpdir/jetty-localhost-44617-hadoop-hdfs-3_4_1-tests_jar-_-any-5741282030154658117/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-09T00:46:03,291 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@69b5b273{HTTP/1.1, (http/1.1)}{localhost:44617} 2024-12-09T00:46:03,291 INFO [Time-limited test {}] server.Server(415): Started @2485ms 2024-12-09T00:46:03,759 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T00:46:03,766 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T00:46:03,767 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T00:46:03,768 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T00:46:03,768 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-09T00:46:03,769 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@179ed6d0{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fd1ddd71-8f9f-7adc-80ee-28e06904eeaf/hadoop.log.dir/,AVAILABLE} 2024-12-09T00:46:03,769 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4f02cc61{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T00:46:03,864 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3990ff75{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fd1ddd71-8f9f-7adc-80ee-28e06904eeaf/java.io.tmpdir/jetty-localhost-34897-hadoop-hdfs-3_4_1-tests_jar-_-any-6982873883750064915/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T00:46:03,865 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@48731e1b{HTTP/1.1, (http/1.1)}{localhost:34897} 2024-12-09T00:46:03,865 INFO [Time-limited test {}] server.Server(415): Started @3059ms 2024-12-09T00:46:03,910 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T00:46:04,009 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T00:46:04,013 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T00:46:04,014 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T00:46:04,015 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T00:46:04,015 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-09T00:46:04,016 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@50510811{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fd1ddd71-8f9f-7adc-80ee-28e06904eeaf/hadoop.log.dir/,AVAILABLE} 2024-12-09T00:46:04,016 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@125705fb{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T00:46:04,111 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@133f1bad{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fd1ddd71-8f9f-7adc-80ee-28e06904eeaf/java.io.tmpdir/jetty-localhost-43219-hadoop-hdfs-3_4_1-tests_jar-_-any-3556944027661423480/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T00:46:04,112 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@39860596{HTTP/1.1, (http/1.1)}{localhost:43219} 2024-12-09T00:46:04,112 INFO [Time-limited test {}] server.Server(415): Started @3306ms 2024-12-09T00:46:04,114 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T00:46:04,148 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T00:46:04,152 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T00:46:04,153 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T00:46:04,153 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T00:46:04,153 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-09T00:46:04,154 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@28d0ee11{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fd1ddd71-8f9f-7adc-80ee-28e06904eeaf/hadoop.log.dir/,AVAILABLE} 2024-12-09T00:46:04,155 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@14c1b227{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T00:46:04,247 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@17f8e572{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fd1ddd71-8f9f-7adc-80ee-28e06904eeaf/java.io.tmpdir/jetty-localhost-35603-hadoop-hdfs-3_4_1-tests_jar-_-any-1575531091382382049/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T00:46:04,248 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@11aaae40{HTTP/1.1, (http/1.1)}{localhost:35603} 2024-12-09T00:46:04,248 INFO [Time-limited test {}] server.Server(415): Started @3442ms 2024-12-09T00:46:04,249 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T00:46:05,062 WARN [Thread-122 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fd1ddd71-8f9f-7adc-80ee-28e06904eeaf/cluster_2206ffd7-7a5c-39b4-24d8-aeab961c1001/data/data3/current/BP-542836953-172.17.0.2-1733705162438/current, will proceed with Du for space computation calculation, 2024-12-09T00:46:05,062 WARN [Thread-125 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fd1ddd71-8f9f-7adc-80ee-28e06904eeaf/cluster_2206ffd7-7a5c-39b4-24d8-aeab961c1001/data/data2/current/BP-542836953-172.17.0.2-1733705162438/current, will proceed with Du for space computation calculation, 2024-12-09T00:46:05,062 WARN [Thread-123 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fd1ddd71-8f9f-7adc-80ee-28e06904eeaf/cluster_2206ffd7-7a5c-39b4-24d8-aeab961c1001/data/data4/current/BP-542836953-172.17.0.2-1733705162438/current, will proceed with Du for space computation calculation, 2024-12-09T00:46:05,062 WARN [Thread-124 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fd1ddd71-8f9f-7adc-80ee-28e06904eeaf/cluster_2206ffd7-7a5c-39b4-24d8-aeab961c1001/data/data1/current/BP-542836953-172.17.0.2-1733705162438/current, will proceed with Du for space computation calculation, 2024-12-09T00:46:05,089 WARN [Thread-81 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T00:46:05,089 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T00:46:05,133 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x4265c4b3cb870c93 with lease ID 0xb966a17a131b743f: Processing first storage report for DS-53fe86d1-a3b2-464e-8a83-720a8abf669a from datanode DatanodeRegistration(127.0.0.1:36967, datanodeUuid=5161c013-0cec-4da8-b2fa-50669b93914d, infoPort=38503, infoSecurePort=0, ipcPort=39757, storageInfo=lv=-57;cid=testClusterID;nsid=329781139;c=1733705162439) 2024-12-09T00:46:05,135 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x4265c4b3cb870c93 with lease ID 0xb966a17a131b743f: from storage DS-53fe86d1-a3b2-464e-8a83-720a8abf669a node DatanodeRegistration(127.0.0.1:36967, datanodeUuid=5161c013-0cec-4da8-b2fa-50669b93914d, infoPort=38503, infoSecurePort=0, ipcPort=39757, storageInfo=lv=-57;cid=testClusterID;nsid=329781139;c=1733705162439), blocks: 0, hasStaleStorage: true, processing time: 2 msecs, invalidatedBlocks: 0 2024-12-09T00:46:05,135 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xd40883072e7881fa with lease ID 0xb966a17a131b743e: Processing first storage report for DS-4889d365-bc81-4b9e-bac9-b22158b815bd from datanode DatanodeRegistration(127.0.0.1:36209, datanodeUuid=68577e05-384d-4a9c-811c-e6eb1d65658e, infoPort=36505, infoSecurePort=0, ipcPort=33581, storageInfo=lv=-57;cid=testClusterID;nsid=329781139;c=1733705162439) 2024-12-09T00:46:05,135 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd40883072e7881fa with lease ID 0xb966a17a131b743e: from storage DS-4889d365-bc81-4b9e-bac9-b22158b815bd node DatanodeRegistration(127.0.0.1:36209, datanodeUuid=68577e05-384d-4a9c-811c-e6eb1d65658e, infoPort=36505, infoSecurePort=0, ipcPort=33581, storageInfo=lv=-57;cid=testClusterID;nsid=329781139;c=1733705162439), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T00:46:05,136 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x4265c4b3cb870c93 with lease ID 0xb966a17a131b743f: Processing first storage report for DS-bb806213-4ce3-4548-9e92-b683a22cc7a8 from datanode DatanodeRegistration(127.0.0.1:36967, datanodeUuid=5161c013-0cec-4da8-b2fa-50669b93914d, infoPort=38503, infoSecurePort=0, ipcPort=39757, storageInfo=lv=-57;cid=testClusterID;nsid=329781139;c=1733705162439) 2024-12-09T00:46:05,136 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x4265c4b3cb870c93 with lease ID 0xb966a17a131b743f: from storage DS-bb806213-4ce3-4548-9e92-b683a22cc7a8 node DatanodeRegistration(127.0.0.1:36967, datanodeUuid=5161c013-0cec-4da8-b2fa-50669b93914d, infoPort=38503, infoSecurePort=0, ipcPort=39757, storageInfo=lv=-57;cid=testClusterID;nsid=329781139;c=1733705162439), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T00:46:05,136 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xd40883072e7881fa with lease ID 0xb966a17a131b743e: Processing first storage report for DS-9058ce67-99fb-4286-87d7-7273a9840ae1 from datanode DatanodeRegistration(127.0.0.1:36209, datanodeUuid=68577e05-384d-4a9c-811c-e6eb1d65658e, infoPort=36505, infoSecurePort=0, ipcPort=33581, storageInfo=lv=-57;cid=testClusterID;nsid=329781139;c=1733705162439) 2024-12-09T00:46:05,136 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd40883072e7881fa with lease ID 0xb966a17a131b743e: from storage DS-9058ce67-99fb-4286-87d7-7273a9840ae1 node DatanodeRegistration(127.0.0.1:36209, datanodeUuid=68577e05-384d-4a9c-811c-e6eb1d65658e, infoPort=36505, infoSecurePort=0, ipcPort=33581, storageInfo=lv=-57;cid=testClusterID;nsid=329781139;c=1733705162439), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T00:46:05,161 WARN [Thread-142 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fd1ddd71-8f9f-7adc-80ee-28e06904eeaf/cluster_2206ffd7-7a5c-39b4-24d8-aeab961c1001/data/data5/current/BP-542836953-172.17.0.2-1733705162438/current, will proceed with Du for space computation calculation, 2024-12-09T00:46:05,161 WARN [Thread-143 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fd1ddd71-8f9f-7adc-80ee-28e06904eeaf/cluster_2206ffd7-7a5c-39b4-24d8-aeab961c1001/data/data6/current/BP-542836953-172.17.0.2-1733705162438/current, will proceed with Du for space computation calculation, 2024-12-09T00:46:05,183 WARN [Thread-103 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T00:46:05,187 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x7784806e19a99960 with lease ID 0xb966a17a131b7440: Processing first storage report for DS-24d373eb-b69d-4238-bc8a-61d991e9676c from datanode DatanodeRegistration(127.0.0.1:39493, datanodeUuid=1c0c990f-5ffc-4089-90ab-61a1a5e1269a, infoPort=44287, infoSecurePort=0, ipcPort=42217, storageInfo=lv=-57;cid=testClusterID;nsid=329781139;c=1733705162439) 2024-12-09T00:46:05,187 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x7784806e19a99960 with lease ID 0xb966a17a131b7440: from storage DS-24d373eb-b69d-4238-bc8a-61d991e9676c node DatanodeRegistration(127.0.0.1:39493, datanodeUuid=1c0c990f-5ffc-4089-90ab-61a1a5e1269a, infoPort=44287, infoSecurePort=0, ipcPort=42217, storageInfo=lv=-57;cid=testClusterID;nsid=329781139;c=1733705162439), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T00:46:05,188 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x7784806e19a99960 with lease ID 0xb966a17a131b7440: Processing first storage report for DS-1f1931fc-6977-4d96-bd3e-ee844e23b3d6 from datanode DatanodeRegistration(127.0.0.1:39493, datanodeUuid=1c0c990f-5ffc-4089-90ab-61a1a5e1269a, infoPort=44287, infoSecurePort=0, ipcPort=42217, storageInfo=lv=-57;cid=testClusterID;nsid=329781139;c=1733705162439) 2024-12-09T00:46:05,188 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x7784806e19a99960 with lease ID 0xb966a17a131b7440: from storage DS-1f1931fc-6977-4d96-bd3e-ee844e23b3d6 node DatanodeRegistration(127.0.0.1:39493, datanodeUuid=1c0c990f-5ffc-4089-90ab-61a1a5e1269a, infoPort=44287, infoSecurePort=0, ipcPort=42217, storageInfo=lv=-57;cid=testClusterID;nsid=329781139;c=1733705162439), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-09T00:46:05,221 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fd1ddd71-8f9f-7adc-80ee-28e06904eeaf 2024-12-09T00:46:05,292 WARN [Time-limited test {}] erasurecode.ErasureCodeNative(55): ISA-L support is not available in your platform... using builtin-java codec where applicable 2024-12-09T00:46:05,337 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestHBaseWalOnEC#testReadWrite[0] Thread=157, OpenFileDescriptor=393, MaxFileDescriptor=1048576, SystemLoadAverage=139, ProcessCount=11, AvailableMemoryMB=16526 2024-12-09T00:46:05,339 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=3, rsPorts=, rsClass=null, numDataNodes=3, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-09T00:46:05,346 INFO [Time-limited test {}] hbase.HBaseTestingUtil(821): NOT STARTING DFS 2024-12-09T00:46:05,414 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fd1ddd71-8f9f-7adc-80ee-28e06904eeaf/cluster_2206ffd7-7a5c-39b4-24d8-aeab961c1001/zookeeper_0, clientPort=51763, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fd1ddd71-8f9f-7adc-80ee-28e06904eeaf/cluster_2206ffd7-7a5c-39b4-24d8-aeab961c1001/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fd1ddd71-8f9f-7adc-80ee-28e06904eeaf/cluster_2206ffd7-7a5c-39b4-24d8-aeab961c1001/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-09T00:46:05,423 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=51763 2024-12-09T00:46:05,432 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T00:46:05,435 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T00:46:05,544 WARN [Time-limited test {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T00:46:05,545 WARN [Time-limited test {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T00:46:05,589 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1006682308_22 at /127.0.0.1:58432 [Receiving block BP-542836953-172.17.0.2-1733705162438:blk_-9223372036854775792_1001] {}] datanode.DataXceiver(331): 127.0.0.1:36209:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:58432 dst: /127.0.0.1:36209 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T00:46:05,613 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36209 is added to blk_-9223372036854775792_1002 (size=7) 2024-12-09T00:46:06,010 WARN [Time-limited test {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-09T00:46:06,020 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:44353/user/jenkins/test-data/3928cabf-301a-0f2c-49c9-564d3af68bd3 with version=8 2024-12-09T00:46:06,020 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:44353/user/jenkins/test-data/3928cabf-301a-0f2c-49c9-564d3af68bd3/hbase-staging 2024-12-09T00:46:06,101 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-12-09T00:46:06,336 INFO [Time-limited test {}] client.ConnectionUtils(128): master/1bae0942de96:0 server-side Connection retries=45 2024-12-09T00:46:06,344 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T00:46:06,345 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-09T00:46:06,349 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-09T00:46:06,349 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T00:46:06,350 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-09T00:46:06,461 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-09T00:46:06,508 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-12-09T00:46:06,515 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-12-09T00:46:06,518 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-09T00:46:06,538 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 19765 (auto-detected) 2024-12-09T00:46:06,539 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-12-09T00:46:06,554 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:46663 2024-12-09T00:46:06,572 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:46663 connecting to ZooKeeper ensemble=127.0.0.1:51763 2024-12-09T00:46:06,670 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:466630x0, quorum=127.0.0.1:51763, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-09T00:46:06,673 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:46663-0x100081728f20000 connected 2024-12-09T00:46:06,749 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T00:46:06,753 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T00:46:06,766 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:46663-0x100081728f20000, quorum=127.0.0.1:51763, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T00:46:06,770 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:44353/user/jenkins/test-data/3928cabf-301a-0f2c-49c9-564d3af68bd3, hbase.cluster.distributed=false 2024-12-09T00:46:06,791 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:46663-0x100081728f20000, quorum=127.0.0.1:51763, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-09T00:46:06,795 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=46663 2024-12-09T00:46:06,796 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=46663 2024-12-09T00:46:06,796 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=46663 2024-12-09T00:46:06,797 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=46663 2024-12-09T00:46:06,797 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=46663 2024-12-09T00:46:06,883 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/1bae0942de96:0 server-side Connection retries=45 2024-12-09T00:46:06,884 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T00:46:06,884 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-09T00:46:06,884 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-09T00:46:06,885 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T00:46:06,885 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-09T00:46:06,887 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-09T00:46:06,889 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-09T00:46:06,889 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:43359 2024-12-09T00:46:06,891 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:43359 connecting to ZooKeeper ensemble=127.0.0.1:51763 2024-12-09T00:46:06,892 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T00:46:06,895 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T00:46:06,914 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:433590x0, quorum=127.0.0.1:51763, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-09T00:46:06,915 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:433590x0, quorum=127.0.0.1:51763, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T00:46:06,915 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:43359-0x100081728f20001 connected 2024-12-09T00:46:06,919 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-09T00:46:06,925 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-09T00:46:06,928 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:43359-0x100081728f20001, quorum=127.0.0.1:51763, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-09T00:46:06,933 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:43359-0x100081728f20001, quorum=127.0.0.1:51763, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-09T00:46:06,934 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=43359 2024-12-09T00:46:06,934 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=43359 2024-12-09T00:46:06,934 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=43359 2024-12-09T00:46:06,938 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=43359 2024-12-09T00:46:06,939 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=43359 2024-12-09T00:46:06,954 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/1bae0942de96:0 server-side Connection retries=45 2024-12-09T00:46:06,954 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T00:46:06,954 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-09T00:46:06,955 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-09T00:46:06,955 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T00:46:06,955 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-09T00:46:06,955 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-09T00:46:06,956 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-09T00:46:06,956 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:43375 2024-12-09T00:46:06,958 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:43375 connecting to ZooKeeper ensemble=127.0.0.1:51763 2024-12-09T00:46:06,959 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T00:46:06,962 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T00:46:06,989 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:433750x0, quorum=127.0.0.1:51763, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-09T00:46:06,990 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:43375-0x100081728f20002, quorum=127.0.0.1:51763, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T00:46:06,990 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:43375-0x100081728f20002 connected 2024-12-09T00:46:06,991 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-09T00:46:06,992 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-09T00:46:06,993 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:43375-0x100081728f20002, quorum=127.0.0.1:51763, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-09T00:46:06,995 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:43375-0x100081728f20002, quorum=127.0.0.1:51763, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-09T00:46:06,996 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=43375 2024-12-09T00:46:06,996 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=43375 2024-12-09T00:46:06,997 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=43375 2024-12-09T00:46:06,997 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=43375 2024-12-09T00:46:06,998 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=43375 2024-12-09T00:46:07,014 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/1bae0942de96:0 server-side Connection retries=45 2024-12-09T00:46:07,014 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T00:46:07,014 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-09T00:46:07,015 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-09T00:46:07,015 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T00:46:07,015 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-09T00:46:07,015 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-09T00:46:07,015 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-09T00:46:07,016 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:38143 2024-12-09T00:46:07,017 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:38143 connecting to ZooKeeper ensemble=127.0.0.1:51763 2024-12-09T00:46:07,018 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T00:46:07,020 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T00:46:07,031 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:381430x0, quorum=127.0.0.1:51763, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-09T00:46:07,032 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:381430x0, quorum=127.0.0.1:51763, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T00:46:07,032 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:38143-0x100081728f20003 connected 2024-12-09T00:46:07,032 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-09T00:46:07,033 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-09T00:46:07,035 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38143-0x100081728f20003, quorum=127.0.0.1:51763, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-09T00:46:07,037 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38143-0x100081728f20003, quorum=127.0.0.1:51763, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-09T00:46:07,038 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=38143 2024-12-09T00:46:07,038 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=38143 2024-12-09T00:46:07,039 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=38143 2024-12-09T00:46:07,040 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=38143 2024-12-09T00:46:07,041 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=38143 2024-12-09T00:46:07,057 DEBUG [M:0;1bae0942de96:46663 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;1bae0942de96:46663 2024-12-09T00:46:07,058 INFO [master/1bae0942de96:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/1bae0942de96,46663,1733705166187 2024-12-09T00:46:07,073 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46663-0x100081728f20000, quorum=127.0.0.1:51763, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T00:46:07,073 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43359-0x100081728f20001, quorum=127.0.0.1:51763, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T00:46:07,073 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43375-0x100081728f20002, quorum=127.0.0.1:51763, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T00:46:07,073 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38143-0x100081728f20003, quorum=127.0.0.1:51763, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T00:46:07,075 DEBUG [master/1bae0942de96:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:46663-0x100081728f20000, quorum=127.0.0.1:51763, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/1bae0942de96,46663,1733705166187 2024-12-09T00:46:07,097 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43375-0x100081728f20002, quorum=127.0.0.1:51763, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-09T00:46:07,097 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46663-0x100081728f20000, quorum=127.0.0.1:51763, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T00:46:07,097 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38143-0x100081728f20003, quorum=127.0.0.1:51763, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-09T00:46:07,097 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43359-0x100081728f20001, quorum=127.0.0.1:51763, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-09T00:46:07,097 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43375-0x100081728f20002, quorum=127.0.0.1:51763, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T00:46:07,097 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38143-0x100081728f20003, quorum=127.0.0.1:51763, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T00:46:07,098 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43359-0x100081728f20001, quorum=127.0.0.1:51763, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T00:46:07,098 DEBUG [master/1bae0942de96:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:46663-0x100081728f20000, quorum=127.0.0.1:51763, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-09T00:46:07,099 INFO [master/1bae0942de96:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/1bae0942de96,46663,1733705166187 from backup master directory 2024-12-09T00:46:07,106 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46663-0x100081728f20000, quorum=127.0.0.1:51763, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/1bae0942de96,46663,1733705166187 2024-12-09T00:46:07,106 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43375-0x100081728f20002, quorum=127.0.0.1:51763, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T00:46:07,106 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38143-0x100081728f20003, quorum=127.0.0.1:51763, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T00:46:07,106 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46663-0x100081728f20000, quorum=127.0.0.1:51763, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T00:46:07,106 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43359-0x100081728f20001, quorum=127.0.0.1:51763, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T00:46:07,107 WARN [master/1bae0942de96:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-09T00:46:07,107 INFO [master/1bae0942de96:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=1bae0942de96,46663,1733705166187 2024-12-09T00:46:07,109 INFO [master/1bae0942de96:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-12-09T00:46:07,110 INFO [master/1bae0942de96:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-12-09T00:46:07,165 DEBUG [master/1bae0942de96:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:44353/user/jenkins/test-data/3928cabf-301a-0f2c-49c9-564d3af68bd3/hbase.id] with ID: 277868f4-deed-43be-b211-03676c4bb2e6 2024-12-09T00:46:07,165 DEBUG [master/1bae0942de96:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:44353/user/jenkins/test-data/3928cabf-301a-0f2c-49c9-564d3af68bd3/.tmp/hbase.id 2024-12-09T00:46:07,172 WARN [master/1bae0942de96:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T00:46:07,172 WARN [master/1bae0942de96:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T00:46:07,176 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1006682308_22 at /127.0.0.1:58458 [Receiving block BP-542836953-172.17.0.2-1733705162438:blk_-9223372036854775776_1003] {}] datanode.DataXceiver(331): 127.0.0.1:36209:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:58458 dst: /127.0.0.1:36209 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T00:46:07,181 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36209 is added to blk_-9223372036854775776_1004 (size=42) 2024-12-09T00:46:07,182 WARN [master/1bae0942de96:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-09T00:46:07,182 DEBUG [master/1bae0942de96:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:44353/user/jenkins/test-data/3928cabf-301a-0f2c-49c9-564d3af68bd3/.tmp/hbase.id]:[hdfs://localhost:44353/user/jenkins/test-data/3928cabf-301a-0f2c-49c9-564d3af68bd3/hbase.id] 2024-12-09T00:46:07,224 INFO [master/1bae0942de96:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T00:46:07,228 INFO [master/1bae0942de96:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-09T00:46:07,244 INFO [master/1bae0942de96:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 15ms. 2024-12-09T00:46:07,264 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43375-0x100081728f20002, quorum=127.0.0.1:51763, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T00:46:07,264 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43359-0x100081728f20001, quorum=127.0.0.1:51763, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T00:46:07,264 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38143-0x100081728f20003, quorum=127.0.0.1:51763, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T00:46:07,264 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46663-0x100081728f20000, quorum=127.0.0.1:51763, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T00:46:07,278 WARN [master/1bae0942de96:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T00:46:07,278 WARN [master/1bae0942de96:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T00:46:07,281 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1006682308_22 at /127.0.0.1:58466 [Receiving block BP-542836953-172.17.0.2-1733705162438:blk_-9223372036854775760_1005] {}] datanode.DataXceiver(331): 127.0.0.1:36209:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:58466 dst: /127.0.0.1:36209 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T00:46:07,285 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36209 is added to blk_-9223372036854775760_1006 (size=196) 2024-12-09T00:46:07,286 WARN [master/1bae0942de96:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-09T00:46:07,300 INFO [master/1bae0942de96:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-09T00:46:07,302 INFO [master/1bae0942de96:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-09T00:46:07,307 INFO [master/1bae0942de96:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-09T00:46:07,334 WARN [master/1bae0942de96:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T00:46:07,334 WARN [master/1bae0942de96:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T00:46:07,337 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1006682308_22 at /127.0.0.1:35640 [Receiving block BP-542836953-172.17.0.2-1733705162438:blk_-9223372036854775744_1007] {}] datanode.DataXceiver(331): 127.0.0.1:39493:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35640 dst: /127.0.0.1:39493 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T00:46:07,342 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39493 is added to blk_-9223372036854775744_1008 (size=1189) 2024-12-09T00:46:07,342 WARN [master/1bae0942de96:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-09T00:46:07,357 INFO [master/1bae0942de96:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:44353/user/jenkins/test-data/3928cabf-301a-0f2c-49c9-564d3af68bd3/MasterData/data/master/store 2024-12-09T00:46:07,373 WARN [master/1bae0942de96:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T00:46:07,373 WARN [master/1bae0942de96:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T00:46:07,375 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1006682308_22 at /127.0.0.1:35652 [Receiving block BP-542836953-172.17.0.2-1733705162438:blk_-9223372036854775728_1009] {}] datanode.DataXceiver(331): 127.0.0.1:39493:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35652 dst: /127.0.0.1:39493 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T00:46:07,380 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39493 is added to blk_-9223372036854775728_1010 (size=34) 2024-12-09T00:46:07,381 WARN [master/1bae0942de96:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-09T00:46:07,385 INFO [master/1bae0942de96:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-12-09T00:46:07,387 DEBUG [master/1bae0942de96:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T00:46:07,388 DEBUG [master/1bae0942de96:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-09T00:46:07,389 INFO [master/1bae0942de96:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T00:46:07,389 DEBUG [master/1bae0942de96:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T00:46:07,390 DEBUG [master/1bae0942de96:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-09T00:46:07,390 DEBUG [master/1bae0942de96:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T00:46:07,390 INFO [master/1bae0942de96:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T00:46:07,392 DEBUG [master/1bae0942de96:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733705167388Disabling compacts and flushes for region at 1733705167388Disabling writes for close at 1733705167390 (+2 ms)Writing region close event to WAL at 1733705167390Closed at 1733705167390 2024-12-09T00:46:07,394 WARN [master/1bae0942de96:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:44353/user/jenkins/test-data/3928cabf-301a-0f2c-49c9-564d3af68bd3/MasterData/data/master/store/.initializing 2024-12-09T00:46:07,394 DEBUG [master/1bae0942de96:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:44353/user/jenkins/test-data/3928cabf-301a-0f2c-49c9-564d3af68bd3/MasterData/WALs/1bae0942de96,46663,1733705166187 2024-12-09T00:46:07,402 INFO [master/1bae0942de96:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-09T00:46:07,414 INFO [master/1bae0942de96:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=1bae0942de96%2C46663%2C1733705166187, suffix=, logDir=hdfs://localhost:44353/user/jenkins/test-data/3928cabf-301a-0f2c-49c9-564d3af68bd3/MasterData/WALs/1bae0942de96,46663,1733705166187, archiveDir=hdfs://localhost:44353/user/jenkins/test-data/3928cabf-301a-0f2c-49c9-564d3af68bd3/MasterData/oldWALs, maxLogs=10 2024-12-09T00:46:07,438 DEBUG [master/1bae0942de96:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/3928cabf-301a-0f2c-49c9-564d3af68bd3/MasterData/WALs/1bae0942de96,46663,1733705166187/1bae0942de96%2C46663%2C1733705166187.1733705167418, exclude list is [], retry=0 2024-12-09T00:46:07,455 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(244): No decryptEncryptedDataEncryptionKey method in DFSClient, should be hadoop version with HDFS-12396 java.lang.NoSuchMethodException: org.apache.hadoop.hdfs.DFSClient.decryptEncryptedDataEncryptionKey(org.apache.hadoop.fs.FileEncryptionInfo) at java.lang.Class.getDeclaredMethod(Class.java:2675) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelperWithoutHDFS12396(FanOutOneBlockAsyncDFSOutputSaslHelper.java:183) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelper(FanOutOneBlockAsyncDFSOutputSaslHelper.java:242) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.(FanOutOneBlockAsyncDFSOutputSaslHelper.java:253) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.initialize(FanOutOneBlockAsyncDFSOutputHelper.java:413) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper$5.operationComplete(FanOutOneBlockAsyncDFSOutputHelper.java:472) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper$5.operationComplete(FanOutOneBlockAsyncDFSOutputHelper.java:467) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.NettyFutureUtils.lambda$addListener$0(NettyFutureUtils.java:56) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListener0(DefaultPromise.java:590) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListeners0(DefaultPromise.java:583) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListenersNow(DefaultPromise.java:559) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListeners(DefaultPromise.java:492) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.setValue0(DefaultPromise.java:636) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.setSuccess0(DefaultPromise.java:625) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.trySuccess(DefaultPromise.java:105) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPromise.trySuccess(DefaultChannelPromise.java:84) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.fulfillConnectPromise(AbstractEpollChannel.java:658) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.finishConnect(AbstractEpollChannel.java:696) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.epollOutReady(AbstractEpollChannel.java:567) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.processReady(EpollEventLoop.java:491) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:399) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T00:46:07,456 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:36209,DS-4889d365-bc81-4b9e-bac9-b22158b815bd,DISK] 2024-12-09T00:46:07,456 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:36967,DS-53fe86d1-a3b2-464e-8a83-720a8abf669a,DISK] 2024-12-09T00:46:07,456 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39493,DS-24d373eb-b69d-4238-bc8a-61d991e9676c,DISK] 2024-12-09T00:46:07,459 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.ProtobufDecoder(117): Hadoop 3.3 and above shades protobuf. 2024-12-09T00:46:07,496 INFO [master/1bae0942de96:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/3928cabf-301a-0f2c-49c9-564d3af68bd3/MasterData/WALs/1bae0942de96,46663,1733705166187/1bae0942de96%2C46663%2C1733705166187.1733705167418 2024-12-09T00:46:07,497 DEBUG [master/1bae0942de96:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:44287:44287),(127.0.0.1/127.0.0.1:38503:38503),(127.0.0.1/127.0.0.1:36505:36505)] 2024-12-09T00:46:07,497 DEBUG [master/1bae0942de96:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-09T00:46:07,498 DEBUG [master/1bae0942de96:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T00:46:07,500 DEBUG [master/1bae0942de96:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T00:46:07,501 DEBUG [master/1bae0942de96:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T00:46:07,531 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T00:46:07,552 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-09T00:46:07,556 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T00:46:07,558 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T00:46:07,558 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T00:46:07,561 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-09T00:46:07,562 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T00:46:07,563 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T00:46:07,563 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T00:46:07,565 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-09T00:46:07,565 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T00:46:07,566 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T00:46:07,567 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T00:46:07,569 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-09T00:46:07,569 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T00:46:07,570 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T00:46:07,570 DEBUG [master/1bae0942de96:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T00:46:07,573 DEBUG [master/1bae0942de96:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44353/user/jenkins/test-data/3928cabf-301a-0f2c-49c9-564d3af68bd3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-09T00:46:07,575 DEBUG [master/1bae0942de96:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44353/user/jenkins/test-data/3928cabf-301a-0f2c-49c9-564d3af68bd3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-09T00:46:07,579 DEBUG [master/1bae0942de96:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T00:46:07,580 DEBUG [master/1bae0942de96:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T00:46:07,583 DEBUG [master/1bae0942de96:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-09T00:46:07,586 DEBUG [master/1bae0942de96:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T00:46:07,591 DEBUG [master/1bae0942de96:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44353/user/jenkins/test-data/3928cabf-301a-0f2c-49c9-564d3af68bd3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T00:46:07,592 INFO [master/1bae0942de96:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=61460227, jitterRate=-0.08417125046253204}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-09T00:46:07,598 DEBUG [master/1bae0942de96:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733705167512Initializing all the Stores at 1733705167514 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733705167514Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733705167515 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733705167515Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733705167515Cleaning up temporary data from old regions at 1733705167580 (+65 ms)Region opened successfully at 1733705167598 (+18 ms) 2024-12-09T00:46:07,599 INFO [master/1bae0942de96:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-09T00:46:07,632 DEBUG [master/1bae0942de96:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1a3b2945, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=1bae0942de96/172.17.0.2:0 2024-12-09T00:46:07,659 INFO [master/1bae0942de96:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-09T00:46:07,667 INFO [master/1bae0942de96:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-09T00:46:07,667 INFO [master/1bae0942de96:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-09T00:46:07,670 INFO [master/1bae0942de96:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-09T00:46:07,671 INFO [master/1bae0942de96:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 1 msec 2024-12-09T00:46:07,675 INFO [master/1bae0942de96:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 4 msec 2024-12-09T00:46:07,675 INFO [master/1bae0942de96:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-09T00:46:07,697 INFO [master/1bae0942de96:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-09T00:46:07,705 DEBUG [master/1bae0942de96:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46663-0x100081728f20000, quorum=127.0.0.1:51763, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-09T00:46:07,755 DEBUG [master/1bae0942de96:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-09T00:46:07,759 INFO [master/1bae0942de96:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-09T00:46:07,761 DEBUG [master/1bae0942de96:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46663-0x100081728f20000, quorum=127.0.0.1:51763, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-09T00:46:07,772 DEBUG [master/1bae0942de96:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-09T00:46:07,774 INFO [master/1bae0942de96:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-09T00:46:07,777 DEBUG [master/1bae0942de96:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46663-0x100081728f20000, quorum=127.0.0.1:51763, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-09T00:46:07,788 DEBUG [master/1bae0942de96:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-09T00:46:07,790 DEBUG [master/1bae0942de96:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46663-0x100081728f20000, quorum=127.0.0.1:51763, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-09T00:46:07,797 DEBUG [master/1bae0942de96:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-09T00:46:07,815 DEBUG [master/1bae0942de96:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46663-0x100081728f20000, quorum=127.0.0.1:51763, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-09T00:46:07,822 DEBUG [master/1bae0942de96:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-09T00:46:07,830 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43375-0x100081728f20002, quorum=127.0.0.1:51763, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-09T00:46:07,830 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46663-0x100081728f20000, quorum=127.0.0.1:51763, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-09T00:46:07,831 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43375-0x100081728f20002, quorum=127.0.0.1:51763, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T00:46:07,831 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38143-0x100081728f20003, quorum=127.0.0.1:51763, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-09T00:46:07,831 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46663-0x100081728f20000, quorum=127.0.0.1:51763, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T00:46:07,831 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43359-0x100081728f20001, quorum=127.0.0.1:51763, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-09T00:46:07,831 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38143-0x100081728f20003, quorum=127.0.0.1:51763, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T00:46:07,831 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43359-0x100081728f20001, quorum=127.0.0.1:51763, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T00:46:07,835 INFO [master/1bae0942de96:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=1bae0942de96,46663,1733705166187, sessionid=0x100081728f20000, setting cluster-up flag (Was=false) 2024-12-09T00:46:07,864 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38143-0x100081728f20003, quorum=127.0.0.1:51763, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T00:46:07,864 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43375-0x100081728f20002, quorum=127.0.0.1:51763, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T00:46:07,864 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46663-0x100081728f20000, quorum=127.0.0.1:51763, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T00:46:07,864 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43359-0x100081728f20001, quorum=127.0.0.1:51763, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T00:46:07,889 DEBUG [master/1bae0942de96:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-09T00:46:07,893 DEBUG [master/1bae0942de96:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=1bae0942de96,46663,1733705166187 2024-12-09T00:46:07,914 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43375-0x100081728f20002, quorum=127.0.0.1:51763, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T00:46:07,914 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46663-0x100081728f20000, quorum=127.0.0.1:51763, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T00:46:07,914 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43359-0x100081728f20001, quorum=127.0.0.1:51763, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T00:46:07,915 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38143-0x100081728f20003, quorum=127.0.0.1:51763, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T00:46:07,939 DEBUG [master/1bae0942de96:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-09T00:46:07,941 DEBUG [master/1bae0942de96:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=1bae0942de96,46663,1733705166187 2024-12-09T00:46:07,949 INFO [master/1bae0942de96:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:44353/user/jenkins/test-data/3928cabf-301a-0f2c-49c9-564d3af68bd3/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-09T00:46:08,014 DEBUG [master/1bae0942de96:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-09T00:46:08,023 INFO [master/1bae0942de96:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-09T00:46:08,029 INFO [master/1bae0942de96:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-09T00:46:08,033 DEBUG [master/1bae0942de96:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 1bae0942de96,46663,1733705166187 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-09T00:46:08,040 DEBUG [master/1bae0942de96:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/1bae0942de96:0, corePoolSize=5, maxPoolSize=5 2024-12-09T00:46:08,040 DEBUG [master/1bae0942de96:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/1bae0942de96:0, corePoolSize=5, maxPoolSize=5 2024-12-09T00:46:08,041 DEBUG [master/1bae0942de96:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/1bae0942de96:0, corePoolSize=5, maxPoolSize=5 2024-12-09T00:46:08,041 DEBUG [master/1bae0942de96:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/1bae0942de96:0, corePoolSize=5, maxPoolSize=5 2024-12-09T00:46:08,041 DEBUG [master/1bae0942de96:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/1bae0942de96:0, corePoolSize=10, maxPoolSize=10 2024-12-09T00:46:08,041 DEBUG [master/1bae0942de96:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/1bae0942de96:0, corePoolSize=1, maxPoolSize=1 2024-12-09T00:46:08,041 DEBUG [master/1bae0942de96:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/1bae0942de96:0, corePoolSize=2, maxPoolSize=2 2024-12-09T00:46:08,041 DEBUG [master/1bae0942de96:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/1bae0942de96:0, corePoolSize=1, maxPoolSize=1 2024-12-09T00:46:08,043 INFO [master/1bae0942de96:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733705198043 2024-12-09T00:46:08,044 INFO [master/1bae0942de96:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-09T00:46:08,045 INFO [RS:1;1bae0942de96:43375 {}] regionserver.HRegionServer(746): ClusterId : 277868f4-deed-43be-b211-03676c4bb2e6 2024-12-09T00:46:08,045 INFO [RS:0;1bae0942de96:43359 {}] regionserver.HRegionServer(746): ClusterId : 277868f4-deed-43be-b211-03676c4bb2e6 2024-12-09T00:46:08,046 INFO [master/1bae0942de96:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-09T00:46:08,047 INFO [RS:2;1bae0942de96:38143 {}] regionserver.HRegionServer(746): ClusterId : 277868f4-deed-43be-b211-03676c4bb2e6 2024-12-09T00:46:08,048 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-09T00:46:08,048 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-09T00:46:08,048 DEBUG [RS:2;1bae0942de96:38143 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-09T00:46:08,048 DEBUG [RS:0;1bae0942de96:43359 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-09T00:46:08,048 DEBUG [RS:1;1bae0942de96:43375 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-09T00:46:08,049 INFO [master/1bae0942de96:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-09T00:46:08,050 INFO [master/1bae0942de96:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-09T00:46:08,050 INFO [master/1bae0942de96:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-09T00:46:08,050 INFO [master/1bae0942de96:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-09T00:46:08,051 INFO [master/1bae0942de96:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-09T00:46:08,054 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T00:46:08,054 INFO [master/1bae0942de96:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-09T00:46:08,054 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-09T00:46:08,055 INFO [master/1bae0942de96:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-09T00:46:08,055 INFO [master/1bae0942de96:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-09T00:46:08,057 INFO [master/1bae0942de96:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-09T00:46:08,057 INFO [master/1bae0942de96:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-09T00:46:08,059 DEBUG [master/1bae0942de96:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/1bae0942de96:0:becomeActiveMaster-HFileCleaner.large.0-1733705168058,5,FailOnTimeoutGroup] 2024-12-09T00:46:08,060 DEBUG [master/1bae0942de96:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/1bae0942de96:0:becomeActiveMaster-HFileCleaner.small.0-1733705168059,5,FailOnTimeoutGroup] 2024-12-09T00:46:08,060 INFO [master/1bae0942de96:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-09T00:46:08,061 INFO [master/1bae0942de96:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-09T00:46:08,062 INFO [master/1bae0942de96:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-09T00:46:08,062 INFO [master/1bae0942de96:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-09T00:46:08,064 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T00:46:08,064 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T00:46:08,066 DEBUG [RS:1;1bae0942de96:43375 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-09T00:46:08,066 DEBUG [RS:0;1bae0942de96:43359 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-09T00:46:08,066 DEBUG [RS:2;1bae0942de96:38143 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-09T00:46:08,066 DEBUG [RS:1;1bae0942de96:43375 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-09T00:46:08,066 DEBUG [RS:0;1bae0942de96:43359 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-09T00:46:08,066 DEBUG [RS:2;1bae0942de96:38143 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-09T00:46:08,077 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1006682308_22 at /127.0.0.1:35688 [Receiving block BP-542836953-172.17.0.2-1733705162438:blk_-9223372036854775712_1012] {}] datanode.DataXceiver(331): 127.0.0.1:39493:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35688 dst: /127.0.0.1:39493 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T00:46:08,082 DEBUG [RS:1;1bae0942de96:43375 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-09T00:46:08,082 DEBUG [RS:2;1bae0942de96:38143 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-09T00:46:08,082 DEBUG [RS:0;1bae0942de96:43359 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-09T00:46:08,082 DEBUG [RS:0;1bae0942de96:43359 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@20a1f023, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=1bae0942de96/172.17.0.2:0 2024-12-09T00:46:08,082 DEBUG [RS:2;1bae0942de96:38143 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5543e9c4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=1bae0942de96/172.17.0.2:0 2024-12-09T00:46:08,082 DEBUG [RS:1;1bae0942de96:43375 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@a5edcb3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=1bae0942de96/172.17.0.2:0 2024-12-09T00:46:08,083 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39493 is added to blk_-9223372036854775712_1013 (size=1321) 2024-12-09T00:46:08,085 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-09T00:46:08,086 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:44353/user/jenkins/test-data/3928cabf-301a-0f2c-49c9-564d3af68bd3/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-09T00:46:08,087 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:44353/user/jenkins/test-data/3928cabf-301a-0f2c-49c9-564d3af68bd3 2024-12-09T00:46:08,100 DEBUG [RS:0;1bae0942de96:43359 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;1bae0942de96:43359 2024-12-09T00:46:08,100 DEBUG [RS:1;1bae0942de96:43375 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;1bae0942de96:43375 2024-12-09T00:46:08,102 DEBUG [RS:2;1bae0942de96:38143 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:2;1bae0942de96:38143 2024-12-09T00:46:08,103 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T00:46:08,103 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T00:46:08,103 INFO [RS:2;1bae0942de96:38143 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-09T00:46:08,103 INFO [RS:1;1bae0942de96:43375 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-09T00:46:08,103 INFO [RS:0;1bae0942de96:43359 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-09T00:46:08,104 INFO [RS:2;1bae0942de96:38143 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-09T00:46:08,104 INFO [RS:0;1bae0942de96:43359 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-09T00:46:08,104 INFO [RS:1;1bae0942de96:43375 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-09T00:46:08,104 DEBUG [RS:2;1bae0942de96:38143 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-09T00:46:08,104 DEBUG [RS:1;1bae0942de96:43375 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-09T00:46:08,104 DEBUG [RS:0;1bae0942de96:43359 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-09T00:46:08,107 INFO [RS:0;1bae0942de96:43359 {}] regionserver.HRegionServer(2659): reportForDuty to master=1bae0942de96,46663,1733705166187 with port=43359, startcode=1733705166853 2024-12-09T00:46:08,107 INFO [RS:2;1bae0942de96:38143 {}] regionserver.HRegionServer(2659): reportForDuty to master=1bae0942de96,46663,1733705166187 with port=38143, startcode=1733705167014 2024-12-09T00:46:08,107 INFO [RS:1;1bae0942de96:43375 {}] regionserver.HRegionServer(2659): reportForDuty to master=1bae0942de96,46663,1733705166187 with port=43375, startcode=1733705166954 2024-12-09T00:46:08,120 DEBUG [RS:1;1bae0942de96:43375 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-09T00:46:08,120 DEBUG [RS:0;1bae0942de96:43359 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-09T00:46:08,120 DEBUG [RS:2;1bae0942de96:38143 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-09T00:46:08,130 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1006682308_22 at /127.0.0.1:58498 [Receiving block BP-542836953-172.17.0.2-1733705162438:blk_-9223372036854775696_1014] {}] datanode.DataXceiver(331): 127.0.0.1:36209:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:58498 dst: /127.0.0.1:36209 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T00:46:08,141 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36209 is added to blk_-9223372036854775696_1015 (size=32) 2024-12-09T00:46:08,143 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-09T00:46:08,144 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T00:46:08,149 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36967 is added to blk_-9223372036854775789_1002 (size=7) 2024-12-09T00:46:08,149 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39493 is added to blk_-9223372036854775788_1002 (size=7) 2024-12-09T00:46:08,151 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-09T00:46:08,156 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-09T00:46:08,157 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T00:46:08,158 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T00:46:08,159 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-09T00:46:08,162 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-09T00:46:08,162 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T00:46:08,163 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47405, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-12-09T00:46:08,163 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44285, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-12-09T00:46:08,163 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40315, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-12-09T00:46:08,164 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T00:46:08,164 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-09T00:46:08,167 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-09T00:46:08,167 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T00:46:08,169 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T00:46:08,169 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-09T00:46:08,169 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46663 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 1bae0942de96,43359,1733705166853 2024-12-09T00:46:08,172 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46663 {}] master.ServerManager(517): Registering regionserver=1bae0942de96,43359,1733705166853 2024-12-09T00:46:08,173 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-09T00:46:08,173 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T00:46:08,174 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T00:46:08,174 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-09T00:46:08,176 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44353/user/jenkins/test-data/3928cabf-301a-0f2c-49c9-564d3af68bd3/data/hbase/meta/1588230740 2024-12-09T00:46:08,177 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44353/user/jenkins/test-data/3928cabf-301a-0f2c-49c9-564d3af68bd3/data/hbase/meta/1588230740 2024-12-09T00:46:08,180 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-09T00:46:08,180 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-09T00:46:08,181 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-09T00:46:08,184 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-09T00:46:08,185 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46663 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 1bae0942de96,38143,1733705167014 2024-12-09T00:46:08,185 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46663 {}] master.ServerManager(517): Registering regionserver=1bae0942de96,38143,1733705167014 2024-12-09T00:46:08,189 DEBUG [RS:0;1bae0942de96:43359 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:44353/user/jenkins/test-data/3928cabf-301a-0f2c-49c9-564d3af68bd3 2024-12-09T00:46:08,189 DEBUG [RS:0;1bae0942de96:43359 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:44353 2024-12-09T00:46:08,190 DEBUG [RS:0;1bae0942de96:43359 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-09T00:46:08,191 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46663 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 1bae0942de96,43375,1733705166954 2024-12-09T00:46:08,191 DEBUG [RS:2;1bae0942de96:38143 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:44353/user/jenkins/test-data/3928cabf-301a-0f2c-49c9-564d3af68bd3 2024-12-09T00:46:08,191 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46663 {}] master.ServerManager(517): Registering regionserver=1bae0942de96,43375,1733705166954 2024-12-09T00:46:08,191 DEBUG [RS:2;1bae0942de96:38143 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:44353 2024-12-09T00:46:08,192 DEBUG [RS:2;1bae0942de96:38143 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-09T00:46:08,195 DEBUG [RS:1;1bae0942de96:43375 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:44353/user/jenkins/test-data/3928cabf-301a-0f2c-49c9-564d3af68bd3 2024-12-09T00:46:08,195 DEBUG [RS:1;1bae0942de96:43375 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:44353 2024-12-09T00:46:08,195 DEBUG [RS:1;1bae0942de96:43375 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-09T00:46:08,202 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44353/user/jenkins/test-data/3928cabf-301a-0f2c-49c9-564d3af68bd3/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T00:46:08,203 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=59660138, jitterRate=-0.11099466681480408}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-09T00:46:08,205 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733705168145Initializing all the Stores at 1733705168147 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733705168147Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733705168151 (+4 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733705168151Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733705168151Cleaning up temporary data from old regions at 1733705168180 (+29 ms)Region opened successfully at 1733705168205 (+25 ms) 2024-12-09T00:46:08,205 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-09T00:46:08,205 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-09T00:46:08,205 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-09T00:46:08,206 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-09T00:46:08,206 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-09T00:46:08,207 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-09T00:46:08,207 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733705168205Disabling compacts and flushes for region at 1733705168205Disabling writes for close at 1733705168206 (+1 ms)Writing region close event to WAL at 1733705168206Closed at 1733705168207 (+1 ms) 2024-12-09T00:46:08,210 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-09T00:46:08,210 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-09T00:46:08,216 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-09T00:46:08,223 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-09T00:46:08,226 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-09T00:46:08,233 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46663-0x100081728f20000, quorum=127.0.0.1:51763, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-09T00:46:08,247 DEBUG [RS:2;1bae0942de96:38143 {}] zookeeper.ZKUtil(111): regionserver:38143-0x100081728f20003, quorum=127.0.0.1:51763, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/1bae0942de96,38143,1733705167014 2024-12-09T00:46:08,247 DEBUG [RS:0;1bae0942de96:43359 {}] zookeeper.ZKUtil(111): regionserver:43359-0x100081728f20001, quorum=127.0.0.1:51763, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/1bae0942de96,43359,1733705166853 2024-12-09T00:46:08,247 WARN [RS:2;1bae0942de96:38143 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-09T00:46:08,247 WARN [RS:0;1bae0942de96:43359 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-09T00:46:08,248 INFO [RS:2;1bae0942de96:38143 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-09T00:46:08,248 INFO [RS:0;1bae0942de96:43359 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-09T00:46:08,248 DEBUG [RS:2;1bae0942de96:38143 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:44353/user/jenkins/test-data/3928cabf-301a-0f2c-49c9-564d3af68bd3/WALs/1bae0942de96,38143,1733705167014 2024-12-09T00:46:08,248 DEBUG [RS:0;1bae0942de96:43359 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:44353/user/jenkins/test-data/3928cabf-301a-0f2c-49c9-564d3af68bd3/WALs/1bae0942de96,43359,1733705166853 2024-12-09T00:46:08,248 DEBUG [RS:1;1bae0942de96:43375 {}] zookeeper.ZKUtil(111): regionserver:43375-0x100081728f20002, quorum=127.0.0.1:51763, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/1bae0942de96,43375,1733705166954 2024-12-09T00:46:08,248 WARN [RS:1;1bae0942de96:43375 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-09T00:46:08,248 INFO [RS:1;1bae0942de96:43375 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-09T00:46:08,249 DEBUG [RS:1;1bae0942de96:43375 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:44353/user/jenkins/test-data/3928cabf-301a-0f2c-49c9-564d3af68bd3/WALs/1bae0942de96,43375,1733705166954 2024-12-09T00:46:08,250 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [1bae0942de96,43375,1733705166954] 2024-12-09T00:46:08,250 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [1bae0942de96,38143,1733705167014] 2024-12-09T00:46:08,250 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [1bae0942de96,43359,1733705166853] 2024-12-09T00:46:08,275 INFO [RS:0;1bae0942de96:43359 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-09T00:46:08,275 INFO [RS:1;1bae0942de96:43375 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-09T00:46:08,275 INFO [RS:2;1bae0942de96:38143 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-09T00:46:08,288 INFO [RS:1;1bae0942de96:43375 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-09T00:46:08,288 INFO [RS:0;1bae0942de96:43359 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-09T00:46:08,288 INFO [RS:2;1bae0942de96:38143 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-09T00:46:08,293 INFO [RS:1;1bae0942de96:43375 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-09T00:46:08,293 INFO [RS:0;1bae0942de96:43359 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-09T00:46:08,293 INFO [RS:2;1bae0942de96:38143 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-09T00:46:08,293 INFO [RS:2;1bae0942de96:38143 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T00:46:08,293 INFO [RS:0;1bae0942de96:43359 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T00:46:08,293 INFO [RS:1;1bae0942de96:43375 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T00:46:08,294 INFO [RS:2;1bae0942de96:38143 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-09T00:46:08,294 INFO [RS:0;1bae0942de96:43359 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-09T00:46:08,294 INFO [RS:1;1bae0942de96:43375 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-09T00:46:08,299 INFO [RS:2;1bae0942de96:38143 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-09T00:46:08,299 INFO [RS:1;1bae0942de96:43375 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-09T00:46:08,299 INFO [RS:0;1bae0942de96:43359 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-09T00:46:08,301 INFO [RS:1;1bae0942de96:43375 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-09T00:46:08,301 INFO [RS:0;1bae0942de96:43359 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-09T00:46:08,301 INFO [RS:2;1bae0942de96:38143 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-09T00:46:08,301 DEBUG [RS:2;1bae0942de96:38143 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/1bae0942de96:0, corePoolSize=1, maxPoolSize=1 2024-12-09T00:46:08,301 DEBUG [RS:1;1bae0942de96:43375 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/1bae0942de96:0, corePoolSize=1, maxPoolSize=1 2024-12-09T00:46:08,301 DEBUG [RS:0;1bae0942de96:43359 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/1bae0942de96:0, corePoolSize=1, maxPoolSize=1 2024-12-09T00:46:08,301 DEBUG [RS:2;1bae0942de96:38143 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/1bae0942de96:0, corePoolSize=1, maxPoolSize=1 2024-12-09T00:46:08,301 DEBUG [RS:0;1bae0942de96:43359 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/1bae0942de96:0, corePoolSize=1, maxPoolSize=1 2024-12-09T00:46:08,301 DEBUG [RS:1;1bae0942de96:43375 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/1bae0942de96:0, corePoolSize=1, maxPoolSize=1 2024-12-09T00:46:08,301 DEBUG [RS:2;1bae0942de96:38143 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/1bae0942de96:0, corePoolSize=1, maxPoolSize=1 2024-12-09T00:46:08,301 DEBUG [RS:0;1bae0942de96:43359 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/1bae0942de96:0, corePoolSize=1, maxPoolSize=1 2024-12-09T00:46:08,301 DEBUG [RS:1;1bae0942de96:43375 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/1bae0942de96:0, corePoolSize=1, maxPoolSize=1 2024-12-09T00:46:08,301 DEBUG [RS:2;1bae0942de96:38143 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/1bae0942de96:0, corePoolSize=1, maxPoolSize=1 2024-12-09T00:46:08,301 DEBUG [RS:1;1bae0942de96:43375 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/1bae0942de96:0, corePoolSize=1, maxPoolSize=1 2024-12-09T00:46:08,301 DEBUG [RS:0;1bae0942de96:43359 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/1bae0942de96:0, corePoolSize=1, maxPoolSize=1 2024-12-09T00:46:08,301 DEBUG [RS:2;1bae0942de96:38143 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/1bae0942de96:0, corePoolSize=1, maxPoolSize=1 2024-12-09T00:46:08,301 DEBUG [RS:0;1bae0942de96:43359 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/1bae0942de96:0, corePoolSize=1, maxPoolSize=1 2024-12-09T00:46:08,301 DEBUG [RS:1;1bae0942de96:43375 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/1bae0942de96:0, corePoolSize=1, maxPoolSize=1 2024-12-09T00:46:08,302 DEBUG [RS:2;1bae0942de96:38143 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/1bae0942de96:0, corePoolSize=2, maxPoolSize=2 2024-12-09T00:46:08,302 DEBUG [RS:0;1bae0942de96:43359 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/1bae0942de96:0, corePoolSize=2, maxPoolSize=2 2024-12-09T00:46:08,302 DEBUG [RS:2;1bae0942de96:38143 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/1bae0942de96:0, corePoolSize=1, maxPoolSize=1 2024-12-09T00:46:08,302 DEBUG [RS:1;1bae0942de96:43375 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/1bae0942de96:0, corePoolSize=2, maxPoolSize=2 2024-12-09T00:46:08,302 DEBUG [RS:0;1bae0942de96:43359 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/1bae0942de96:0, corePoolSize=1, maxPoolSize=1 2024-12-09T00:46:08,302 DEBUG [RS:2;1bae0942de96:38143 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/1bae0942de96:0, corePoolSize=1, maxPoolSize=1 2024-12-09T00:46:08,302 DEBUG [RS:0;1bae0942de96:43359 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/1bae0942de96:0, corePoolSize=1, maxPoolSize=1 2024-12-09T00:46:08,302 DEBUG [RS:1;1bae0942de96:43375 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/1bae0942de96:0, corePoolSize=1, maxPoolSize=1 2024-12-09T00:46:08,302 DEBUG [RS:2;1bae0942de96:38143 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/1bae0942de96:0, corePoolSize=1, maxPoolSize=1 2024-12-09T00:46:08,302 DEBUG [RS:0;1bae0942de96:43359 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/1bae0942de96:0, corePoolSize=1, maxPoolSize=1 2024-12-09T00:46:08,302 DEBUG [RS:1;1bae0942de96:43375 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/1bae0942de96:0, corePoolSize=1, maxPoolSize=1 2024-12-09T00:46:08,302 DEBUG [RS:0;1bae0942de96:43359 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/1bae0942de96:0, corePoolSize=1, maxPoolSize=1 2024-12-09T00:46:08,302 DEBUG [RS:2;1bae0942de96:38143 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/1bae0942de96:0, corePoolSize=1, maxPoolSize=1 2024-12-09T00:46:08,302 DEBUG [RS:0;1bae0942de96:43359 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/1bae0942de96:0, corePoolSize=1, maxPoolSize=1 2024-12-09T00:46:08,302 DEBUG [RS:2;1bae0942de96:38143 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/1bae0942de96:0, corePoolSize=1, maxPoolSize=1 2024-12-09T00:46:08,302 DEBUG [RS:1;1bae0942de96:43375 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/1bae0942de96:0, corePoolSize=1, maxPoolSize=1 2024-12-09T00:46:08,302 DEBUG [RS:0;1bae0942de96:43359 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/1bae0942de96:0, corePoolSize=1, maxPoolSize=1 2024-12-09T00:46:08,302 DEBUG [RS:2;1bae0942de96:38143 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/1bae0942de96:0, corePoolSize=1, maxPoolSize=1 2024-12-09T00:46:08,302 DEBUG [RS:1;1bae0942de96:43375 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/1bae0942de96:0, corePoolSize=1, maxPoolSize=1 2024-12-09T00:46:08,302 DEBUG [RS:0;1bae0942de96:43359 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/1bae0942de96:0, corePoolSize=3, maxPoolSize=3 2024-12-09T00:46:08,302 DEBUG [RS:2;1bae0942de96:38143 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/1bae0942de96:0, corePoolSize=3, maxPoolSize=3 2024-12-09T00:46:08,302 DEBUG [RS:0;1bae0942de96:43359 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/1bae0942de96:0, corePoolSize=3, maxPoolSize=3 2024-12-09T00:46:08,302 DEBUG [RS:2;1bae0942de96:38143 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/1bae0942de96:0, corePoolSize=3, maxPoolSize=3 2024-12-09T00:46:08,302 DEBUG [RS:1;1bae0942de96:43375 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/1bae0942de96:0, corePoolSize=1, maxPoolSize=1 2024-12-09T00:46:08,303 DEBUG [RS:1;1bae0942de96:43375 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/1bae0942de96:0, corePoolSize=1, maxPoolSize=1 2024-12-09T00:46:08,303 DEBUG [RS:1;1bae0942de96:43375 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/1bae0942de96:0, corePoolSize=3, maxPoolSize=3 2024-12-09T00:46:08,303 DEBUG [RS:1;1bae0942de96:43375 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/1bae0942de96:0, corePoolSize=3, maxPoolSize=3 2024-12-09T00:46:08,304 INFO [RS:0;1bae0942de96:43359 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T00:46:08,304 INFO [RS:0;1bae0942de96:43359 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T00:46:08,304 INFO [RS:0;1bae0942de96:43359 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T00:46:08,305 INFO [RS:0;1bae0942de96:43359 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-09T00:46:08,305 INFO [RS:0;1bae0942de96:43359 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-09T00:46:08,305 INFO [RS:0;1bae0942de96:43359 {}] hbase.ChoreService(168): Chore ScheduledChore name=1bae0942de96,43359,1733705166853-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-09T00:46:08,305 INFO [RS:2;1bae0942de96:38143 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T00:46:08,305 INFO [RS:1;1bae0942de96:43375 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T00:46:08,305 INFO [RS:2;1bae0942de96:38143 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T00:46:08,305 INFO [RS:1;1bae0942de96:43375 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T00:46:08,305 INFO [RS:2;1bae0942de96:38143 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T00:46:08,305 INFO [RS:1;1bae0942de96:43375 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T00:46:08,305 INFO [RS:2;1bae0942de96:38143 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-09T00:46:08,305 INFO [RS:1;1bae0942de96:43375 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-09T00:46:08,305 INFO [RS:2;1bae0942de96:38143 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-09T00:46:08,305 INFO [RS:1;1bae0942de96:43375 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-09T00:46:08,305 INFO [RS:2;1bae0942de96:38143 {}] hbase.ChoreService(168): Chore ScheduledChore name=1bae0942de96,38143,1733705167014-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-09T00:46:08,305 INFO [RS:1;1bae0942de96:43375 {}] hbase.ChoreService(168): Chore ScheduledChore name=1bae0942de96,43375,1733705166954-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-09T00:46:08,326 INFO [RS:2;1bae0942de96:38143 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-09T00:46:08,326 INFO [RS:1;1bae0942de96:43375 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-09T00:46:08,328 INFO [RS:2;1bae0942de96:38143 {}] hbase.ChoreService(168): Chore ScheduledChore name=1bae0942de96,38143,1733705167014-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T00:46:08,328 INFO [RS:1;1bae0942de96:43375 {}] hbase.ChoreService(168): Chore ScheduledChore name=1bae0942de96,43375,1733705166954-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T00:46:08,329 INFO [RS:2;1bae0942de96:38143 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T00:46:08,329 INFO [RS:1;1bae0942de96:43375 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T00:46:08,329 INFO [RS:2;1bae0942de96:38143 {}] regionserver.Replication(171): 1bae0942de96,38143,1733705167014 started 2024-12-09T00:46:08,329 INFO [RS:1;1bae0942de96:43375 {}] regionserver.Replication(171): 1bae0942de96,43375,1733705166954 started 2024-12-09T00:46:08,331 INFO [RS:0;1bae0942de96:43359 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-09T00:46:08,332 INFO [RS:0;1bae0942de96:43359 {}] hbase.ChoreService(168): Chore ScheduledChore name=1bae0942de96,43359,1733705166853-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T00:46:08,332 INFO [RS:0;1bae0942de96:43359 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T00:46:08,332 INFO [RS:0;1bae0942de96:43359 {}] regionserver.Replication(171): 1bae0942de96,43359,1733705166853 started 2024-12-09T00:46:08,345 INFO [RS:2;1bae0942de96:38143 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T00:46:08,346 INFO [RS:2;1bae0942de96:38143 {}] regionserver.HRegionServer(1482): Serving as 1bae0942de96,38143,1733705167014, RpcServer on 1bae0942de96/172.17.0.2:38143, sessionid=0x100081728f20003 2024-12-09T00:46:08,346 INFO [RS:1;1bae0942de96:43375 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T00:46:08,346 INFO [RS:1;1bae0942de96:43375 {}] regionserver.HRegionServer(1482): Serving as 1bae0942de96,43375,1733705166954, RpcServer on 1bae0942de96/172.17.0.2:43375, sessionid=0x100081728f20002 2024-12-09T00:46:08,346 DEBUG [RS:2;1bae0942de96:38143 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-09T00:46:08,346 DEBUG [RS:1;1bae0942de96:43375 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-09T00:46:08,347 DEBUG [RS:2;1bae0942de96:38143 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 1bae0942de96,38143,1733705167014 2024-12-09T00:46:08,347 DEBUG [RS:1;1bae0942de96:43375 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 1bae0942de96,43375,1733705166954 2024-12-09T00:46:08,347 DEBUG [RS:2;1bae0942de96:38143 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '1bae0942de96,38143,1733705167014' 2024-12-09T00:46:08,347 DEBUG [RS:1;1bae0942de96:43375 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '1bae0942de96,43375,1733705166954' 2024-12-09T00:46:08,347 DEBUG [RS:2;1bae0942de96:38143 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-09T00:46:08,347 DEBUG [RS:1;1bae0942de96:43375 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-09T00:46:08,348 DEBUG [RS:2;1bae0942de96:38143 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-09T00:46:08,348 DEBUG [RS:1;1bae0942de96:43375 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-09T00:46:08,348 DEBUG [RS:1;1bae0942de96:43375 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-09T00:46:08,348 DEBUG [RS:1;1bae0942de96:43375 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-09T00:46:08,349 DEBUG [RS:2;1bae0942de96:38143 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-09T00:46:08,349 DEBUG [RS:2;1bae0942de96:38143 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-09T00:46:08,349 DEBUG [RS:2;1bae0942de96:38143 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 1bae0942de96,38143,1733705167014 2024-12-09T00:46:08,349 DEBUG [RS:1;1bae0942de96:43375 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 1bae0942de96,43375,1733705166954 2024-12-09T00:46:08,349 DEBUG [RS:2;1bae0942de96:38143 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '1bae0942de96,38143,1733705167014' 2024-12-09T00:46:08,349 DEBUG [RS:1;1bae0942de96:43375 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '1bae0942de96,43375,1733705166954' 2024-12-09T00:46:08,349 DEBUG [RS:2;1bae0942de96:38143 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-09T00:46:08,349 DEBUG [RS:1;1bae0942de96:43375 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-09T00:46:08,349 DEBUG [RS:1;1bae0942de96:43375 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-09T00:46:08,349 DEBUG [RS:2;1bae0942de96:38143 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-09T00:46:08,350 DEBUG [RS:1;1bae0942de96:43375 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-09T00:46:08,350 DEBUG [RS:2;1bae0942de96:38143 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-09T00:46:08,350 INFO [RS:1;1bae0942de96:43375 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-09T00:46:08,350 INFO [RS:2;1bae0942de96:38143 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-09T00:46:08,350 INFO [RS:1;1bae0942de96:43375 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-09T00:46:08,350 INFO [RS:2;1bae0942de96:38143 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-09T00:46:08,354 INFO [RS:0;1bae0942de96:43359 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T00:46:08,354 INFO [RS:0;1bae0942de96:43359 {}] regionserver.HRegionServer(1482): Serving as 1bae0942de96,43359,1733705166853, RpcServer on 1bae0942de96/172.17.0.2:43359, sessionid=0x100081728f20001 2024-12-09T00:46:08,354 DEBUG [RS:0;1bae0942de96:43359 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-09T00:46:08,354 DEBUG [RS:0;1bae0942de96:43359 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 1bae0942de96,43359,1733705166853 2024-12-09T00:46:08,354 DEBUG [RS:0;1bae0942de96:43359 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '1bae0942de96,43359,1733705166853' 2024-12-09T00:46:08,354 DEBUG [RS:0;1bae0942de96:43359 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-09T00:46:08,355 DEBUG [RS:0;1bae0942de96:43359 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-09T00:46:08,356 DEBUG [RS:0;1bae0942de96:43359 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-09T00:46:08,356 DEBUG [RS:0;1bae0942de96:43359 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-09T00:46:08,356 DEBUG [RS:0;1bae0942de96:43359 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 1bae0942de96,43359,1733705166853 2024-12-09T00:46:08,356 DEBUG [RS:0;1bae0942de96:43359 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '1bae0942de96,43359,1733705166853' 2024-12-09T00:46:08,356 DEBUG [RS:0;1bae0942de96:43359 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-09T00:46:08,357 DEBUG [RS:0;1bae0942de96:43359 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-09T00:46:08,358 DEBUG [RS:0;1bae0942de96:43359 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-09T00:46:08,358 INFO [RS:0;1bae0942de96:43359 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-09T00:46:08,358 INFO [RS:0;1bae0942de96:43359 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-09T00:46:08,377 WARN [1bae0942de96:46663 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-09T00:46:08,454 INFO [RS:1;1bae0942de96:43375 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-09T00:46:08,454 INFO [RS:2;1bae0942de96:38143 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-09T00:46:08,457 INFO [RS:1;1bae0942de96:43375 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=1bae0942de96%2C43375%2C1733705166954, suffix=, logDir=hdfs://localhost:44353/user/jenkins/test-data/3928cabf-301a-0f2c-49c9-564d3af68bd3/WALs/1bae0942de96,43375,1733705166954, archiveDir=hdfs://localhost:44353/user/jenkins/test-data/3928cabf-301a-0f2c-49c9-564d3af68bd3/oldWALs, maxLogs=32 2024-12-09T00:46:08,457 INFO [RS:2;1bae0942de96:38143 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=1bae0942de96%2C38143%2C1733705167014, suffix=, logDir=hdfs://localhost:44353/user/jenkins/test-data/3928cabf-301a-0f2c-49c9-564d3af68bd3/WALs/1bae0942de96,38143,1733705167014, archiveDir=hdfs://localhost:44353/user/jenkins/test-data/3928cabf-301a-0f2c-49c9-564d3af68bd3/oldWALs, maxLogs=32 2024-12-09T00:46:08,458 INFO [RS:0;1bae0942de96:43359 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-09T00:46:08,461 INFO [RS:0;1bae0942de96:43359 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=1bae0942de96%2C43359%2C1733705166853, suffix=, logDir=hdfs://localhost:44353/user/jenkins/test-data/3928cabf-301a-0f2c-49c9-564d3af68bd3/WALs/1bae0942de96,43359,1733705166853, archiveDir=hdfs://localhost:44353/user/jenkins/test-data/3928cabf-301a-0f2c-49c9-564d3af68bd3/oldWALs, maxLogs=32 2024-12-09T00:46:08,475 DEBUG [RS:0;1bae0942de96:43359 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/3928cabf-301a-0f2c-49c9-564d3af68bd3/WALs/1bae0942de96,43359,1733705166853/1bae0942de96%2C43359%2C1733705166853.1733705168463, exclude list is [], retry=0 2024-12-09T00:46:08,478 DEBUG [RS:1;1bae0942de96:43375 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/3928cabf-301a-0f2c-49c9-564d3af68bd3/WALs/1bae0942de96,43375,1733705166954/1bae0942de96%2C43375%2C1733705166954.1733705168461, exclude list is [], retry=0 2024-12-09T00:46:08,479 DEBUG [RS:2;1bae0942de96:38143 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/3928cabf-301a-0f2c-49c9-564d3af68bd3/WALs/1bae0942de96,38143,1733705167014/1bae0942de96%2C38143%2C1733705167014.1733705168461, exclude list is [], retry=0 2024-12-09T00:46:08,480 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39493,DS-24d373eb-b69d-4238-bc8a-61d991e9676c,DISK] 2024-12-09T00:46:08,480 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:36967,DS-53fe86d1-a3b2-464e-8a83-720a8abf669a,DISK] 2024-12-09T00:46:08,480 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:36209,DS-4889d365-bc81-4b9e-bac9-b22158b815bd,DISK] 2024-12-09T00:46:08,496 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:36209,DS-4889d365-bc81-4b9e-bac9-b22158b815bd,DISK] 2024-12-09T00:46:08,496 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39493,DS-24d373eb-b69d-4238-bc8a-61d991e9676c,DISK] 2024-12-09T00:46:08,497 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:36967,DS-53fe86d1-a3b2-464e-8a83-720a8abf669a,DISK] 2024-12-09T00:46:08,497 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39493,DS-24d373eb-b69d-4238-bc8a-61d991e9676c,DISK] 2024-12-09T00:46:08,497 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:36967,DS-53fe86d1-a3b2-464e-8a83-720a8abf669a,DISK] 2024-12-09T00:46:08,498 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:36209,DS-4889d365-bc81-4b9e-bac9-b22158b815bd,DISK] 2024-12-09T00:46:08,499 INFO [RS:0;1bae0942de96:43359 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/3928cabf-301a-0f2c-49c9-564d3af68bd3/WALs/1bae0942de96,43359,1733705166853/1bae0942de96%2C43359%2C1733705166853.1733705168463 2024-12-09T00:46:08,500 DEBUG [RS:0;1bae0942de96:43359 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:44287:44287),(127.0.0.1/127.0.0.1:38503:38503),(127.0.0.1/127.0.0.1:36505:36505)] 2024-12-09T00:46:08,501 INFO [RS:1;1bae0942de96:43375 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/3928cabf-301a-0f2c-49c9-564d3af68bd3/WALs/1bae0942de96,43375,1733705166954/1bae0942de96%2C43375%2C1733705166954.1733705168461 2024-12-09T00:46:08,502 DEBUG [RS:1;1bae0942de96:43375 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:44287:44287),(127.0.0.1/127.0.0.1:38503:38503),(127.0.0.1/127.0.0.1:36505:36505)] 2024-12-09T00:46:08,502 INFO [RS:2;1bae0942de96:38143 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/3928cabf-301a-0f2c-49c9-564d3af68bd3/WALs/1bae0942de96,38143,1733705167014/1bae0942de96%2C38143%2C1733705167014.1733705168461 2024-12-09T00:46:08,502 DEBUG [RS:2;1bae0942de96:38143 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:36505:36505),(127.0.0.1/127.0.0.1:44287:44287),(127.0.0.1/127.0.0.1:38503:38503)] 2024-12-09T00:46:08,629 DEBUG [1bae0942de96:46663 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=3, allServersCount=3 2024-12-09T00:46:08,635 DEBUG [1bae0942de96:46663 {}] balancer.BalancerClusterState(204): Hosts are {1bae0942de96=0} racks are {/default-rack=0} 2024-12-09T00:46:08,640 DEBUG [1bae0942de96:46663 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-09T00:46:08,640 DEBUG [1bae0942de96:46663 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-09T00:46:08,640 DEBUG [1bae0942de96:46663 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-09T00:46:08,640 DEBUG [1bae0942de96:46663 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-09T00:46:08,641 DEBUG [1bae0942de96:46663 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-09T00:46:08,641 DEBUG [1bae0942de96:46663 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-09T00:46:08,641 INFO [1bae0942de96:46663 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-09T00:46:08,641 INFO [1bae0942de96:46663 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-09T00:46:08,641 INFO [1bae0942de96:46663 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-09T00:46:08,641 DEBUG [1bae0942de96:46663 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-09T00:46:08,647 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=1bae0942de96,43359,1733705166853 2024-12-09T00:46:08,652 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 1bae0942de96,43359,1733705166853, state=OPENING 2024-12-09T00:46:08,697 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-09T00:46:08,706 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38143-0x100081728f20003, quorum=127.0.0.1:51763, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T00:46:08,706 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43375-0x100081728f20002, quorum=127.0.0.1:51763, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T00:46:08,706 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46663-0x100081728f20000, quorum=127.0.0.1:51763, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T00:46:08,706 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43359-0x100081728f20001, quorum=127.0.0.1:51763, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T00:46:08,707 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T00:46:08,707 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T00:46:08,707 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T00:46:08,708 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T00:46:08,711 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-09T00:46:08,715 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=1bae0942de96,43359,1733705166853}] 2024-12-09T00:46:08,893 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-09T00:46:08,895 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45773, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-09T00:46:08,906 INFO [RS_OPEN_META-regionserver/1bae0942de96:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-09T00:46:08,907 INFO [RS_OPEN_META-regionserver/1bae0942de96:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-09T00:46:08,907 INFO [RS_OPEN_META-regionserver/1bae0942de96:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-12-09T00:46:08,911 INFO [RS_OPEN_META-regionserver/1bae0942de96:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=1bae0942de96%2C43359%2C1733705166853.meta, suffix=.meta, logDir=hdfs://localhost:44353/user/jenkins/test-data/3928cabf-301a-0f2c-49c9-564d3af68bd3/WALs/1bae0942de96,43359,1733705166853, archiveDir=hdfs://localhost:44353/user/jenkins/test-data/3928cabf-301a-0f2c-49c9-564d3af68bd3/oldWALs, maxLogs=32 2024-12-09T00:46:08,925 DEBUG [RS_OPEN_META-regionserver/1bae0942de96:0-0 {event_type=M_RS_OPEN_META, pid=3}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/3928cabf-301a-0f2c-49c9-564d3af68bd3/WALs/1bae0942de96,43359,1733705166853/1bae0942de96%2C43359%2C1733705166853.meta.1733705168912.meta, exclude list is [], retry=0 2024-12-09T00:46:08,930 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:36967,DS-53fe86d1-a3b2-464e-8a83-720a8abf669a,DISK] 2024-12-09T00:46:08,930 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39493,DS-24d373eb-b69d-4238-bc8a-61d991e9676c,DISK] 2024-12-09T00:46:08,930 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:36209,DS-4889d365-bc81-4b9e-bac9-b22158b815bd,DISK] 2024-12-09T00:46:08,932 INFO [RS_OPEN_META-regionserver/1bae0942de96:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/3928cabf-301a-0f2c-49c9-564d3af68bd3/WALs/1bae0942de96,43359,1733705166853/1bae0942de96%2C43359%2C1733705166853.meta.1733705168912.meta 2024-12-09T00:46:08,933 DEBUG [RS_OPEN_META-regionserver/1bae0942de96:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:38503:38503),(127.0.0.1/127.0.0.1:44287:44287),(127.0.0.1/127.0.0.1:36505:36505)] 2024-12-09T00:46:08,933 DEBUG [RS_OPEN_META-regionserver/1bae0942de96:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-09T00:46:08,934 DEBUG [RS_OPEN_META-regionserver/1bae0942de96:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-09T00:46:08,937 DEBUG [RS_OPEN_META-regionserver/1bae0942de96:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-09T00:46:08,942 INFO [RS_OPEN_META-regionserver/1bae0942de96:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-09T00:46:08,946 DEBUG [RS_OPEN_META-regionserver/1bae0942de96:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-09T00:46:08,946 DEBUG [RS_OPEN_META-regionserver/1bae0942de96:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T00:46:08,946 DEBUG [RS_OPEN_META-regionserver/1bae0942de96:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-09T00:46:08,947 DEBUG [RS_OPEN_META-regionserver/1bae0942de96:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-09T00:46:08,950 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-09T00:46:08,951 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-09T00:46:08,952 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T00:46:08,952 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T00:46:08,953 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-09T00:46:08,954 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-09T00:46:08,954 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T00:46:08,955 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T00:46:08,955 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-09T00:46:08,956 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-09T00:46:08,957 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T00:46:08,957 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T00:46:08,958 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-09T00:46:08,959 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-09T00:46:08,959 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T00:46:08,960 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T00:46:08,960 DEBUG [RS_OPEN_META-regionserver/1bae0942de96:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-09T00:46:08,962 DEBUG [RS_OPEN_META-regionserver/1bae0942de96:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44353/user/jenkins/test-data/3928cabf-301a-0f2c-49c9-564d3af68bd3/data/hbase/meta/1588230740 2024-12-09T00:46:08,964 DEBUG [RS_OPEN_META-regionserver/1bae0942de96:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44353/user/jenkins/test-data/3928cabf-301a-0f2c-49c9-564d3af68bd3/data/hbase/meta/1588230740 2024-12-09T00:46:08,967 DEBUG [RS_OPEN_META-regionserver/1bae0942de96:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-09T00:46:08,967 DEBUG [RS_OPEN_META-regionserver/1bae0942de96:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-09T00:46:08,967 DEBUG [RS_OPEN_META-regionserver/1bae0942de96:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-09T00:46:08,970 DEBUG [RS_OPEN_META-regionserver/1bae0942de96:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-09T00:46:08,972 INFO [RS_OPEN_META-regionserver/1bae0942de96:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=58882338, jitterRate=-0.12258478999137878}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-09T00:46:08,972 DEBUG [RS_OPEN_META-regionserver/1bae0942de96:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-09T00:46:08,974 DEBUG [RS_OPEN_META-regionserver/1bae0942de96:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733705168947Writing region info on filesystem at 1733705168947Initializing all the Stores at 1733705168949 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733705168949Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733705168950 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733705168950Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733705168950Cleaning up temporary data from old regions at 1733705168967 (+17 ms)Running coprocessor post-open hooks at 1733705168972 (+5 ms)Region opened successfully at 1733705168974 (+2 ms) 2024-12-09T00:46:08,981 INFO [RS_OPEN_META-regionserver/1bae0942de96:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733705168885 2024-12-09T00:46:08,991 DEBUG [RS_OPEN_META-regionserver/1bae0942de96:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-09T00:46:08,991 INFO [RS_OPEN_META-regionserver/1bae0942de96:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-09T00:46:08,993 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=1bae0942de96,43359,1733705166853 2024-12-09T00:46:08,995 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 1bae0942de96,43359,1733705166853, state=OPEN 2024-12-09T00:46:09,030 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46663-0x100081728f20000, quorum=127.0.0.1:51763, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T00:46:09,031 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43375-0x100081728f20002, quorum=127.0.0.1:51763, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T00:46:09,031 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38143-0x100081728f20003, quorum=127.0.0.1:51763, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T00:46:09,031 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43359-0x100081728f20001, quorum=127.0.0.1:51763, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T00:46:09,031 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T00:46:09,031 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T00:46:09,031 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T00:46:09,031 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T00:46:09,032 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=1bae0942de96,43359,1733705166853 2024-12-09T00:46:09,041 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-09T00:46:09,042 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=1bae0942de96,43359,1733705166853 in 318 msec 2024-12-09T00:46:09,049 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-09T00:46:09,049 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 828 msec 2024-12-09T00:46:09,050 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-09T00:46:09,050 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-09T00:46:09,068 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T00:46:09,069 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=1bae0942de96,43359,1733705166853, seqNum=-1] 2024-12-09T00:46:09,087 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T00:46:09,089 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57999, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T00:46:09,130 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.1490 sec 2024-12-09T00:46:09,130 INFO [master/1bae0942de96:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733705169130, completionTime=-1 2024-12-09T00:46:09,133 INFO [master/1bae0942de96:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=3; waited=0ms, expected min=3 server(s), max=3 server(s), master is running 2024-12-09T00:46:09,134 DEBUG [master/1bae0942de96:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-09T00:46:09,165 INFO [master/1bae0942de96:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=3 2024-12-09T00:46:09,165 INFO [master/1bae0942de96:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733705229165 2024-12-09T00:46:09,165 INFO [master/1bae0942de96:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733705289165 2024-12-09T00:46:09,165 INFO [master/1bae0942de96:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 31 msec 2024-12-09T00:46:09,166 DEBUG [master/1bae0942de96:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 1588230740 changed from -1.0 to 0.0, refreshing cache 2024-12-09T00:46:09,173 INFO [master/1bae0942de96:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1bae0942de96,46663,1733705166187-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T00:46:09,174 INFO [master/1bae0942de96:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1bae0942de96,46663,1733705166187-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T00:46:09,174 INFO [master/1bae0942de96:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1bae0942de96,46663,1733705166187-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T00:46:09,175 INFO [master/1bae0942de96:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-1bae0942de96:46663, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T00:46:09,176 INFO [master/1bae0942de96:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-09T00:46:09,176 INFO [master/1bae0942de96:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-09T00:46:09,183 DEBUG [master/1bae0942de96:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-09T00:46:09,205 INFO [master/1bae0942de96:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 2.097sec 2024-12-09T00:46:09,206 INFO [master/1bae0942de96:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-09T00:46:09,207 INFO [master/1bae0942de96:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-09T00:46:09,208 INFO [master/1bae0942de96:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-09T00:46:09,208 INFO [master/1bae0942de96:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-09T00:46:09,208 INFO [master/1bae0942de96:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-09T00:46:09,209 INFO [master/1bae0942de96:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1bae0942de96,46663,1733705166187-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-09T00:46:09,210 INFO [master/1bae0942de96:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1bae0942de96,46663,1733705166187-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-09T00:46:09,215 DEBUG [master/1bae0942de96:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-09T00:46:09,216 INFO [master/1bae0942de96:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-09T00:46:09,217 INFO [master/1bae0942de96:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1bae0942de96,46663,1733705166187-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T00:46:09,256 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@710f4245, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T00:46:09,259 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-12-09T00:46:09,259 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-12-09T00:46:09,263 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 1bae0942de96,46663,-1 for getting cluster id 2024-12-09T00:46:09,266 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-09T00:46:09,273 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '277868f4-deed-43be-b211-03676c4bb2e6' 2024-12-09T00:46:09,275 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-09T00:46:09,275 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "277868f4-deed-43be-b211-03676c4bb2e6" 2024-12-09T00:46:09,276 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@22c83a5b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T00:46:09,276 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [1bae0942de96,46663,-1] 2024-12-09T00:46:09,278 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-09T00:46:09,280 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T00:46:09,281 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50032, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-09T00:46:09,283 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1b3a9a70, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T00:46:09,284 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T00:46:09,290 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=1bae0942de96,43359,1733705166853, seqNum=-1] 2024-12-09T00:46:09,291 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T00:46:09,293 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34584, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T00:46:09,311 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=1bae0942de96,46663,1733705166187 2024-12-09T00:46:09,315 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-09T00:46:09,319 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.AsyncConnectionImpl(321): The fetched master address is 1bae0942de96,46663,1733705166187 2024-12-09T00:46:09,321 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@5b535543 2024-12-09T00:46:09,322 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-09T00:46:09,325 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50034, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-09T00:46:09,330 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46663 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-09T00:46:09,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46663 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC 2024-12-09T00:46:09,340 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_PRE_OPERATION 2024-12-09T00:46:09,342 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46663 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestHBaseWalOnEC" procId is: 4 2024-12-09T00:46:09,342 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T00:46:09,346 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-09T00:46:09,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46663 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-09T00:46:09,355 WARN [PEWorker-3 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T00:46:09,355 WARN [PEWorker-3 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T00:46:09,358 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1006682308_22 at /127.0.0.1:58556 [Receiving block BP-542836953-172.17.0.2-1733705162438:blk_-9223372036854775680_1020] {}] datanode.DataXceiver(331): 127.0.0.1:36209:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:58556 dst: /127.0.0.1:36209 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T00:46:09,364 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36209 is added to blk_-9223372036854775680_1021 (size=392) 2024-12-09T00:46:09,365 WARN [PEWorker-3 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-09T00:46:09,368 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 158988fdf2b02313181214e78f95fcd2, NAME => 'TestHBaseWalOnEC,,1733705169326.158988fdf2b02313181214e78f95fcd2.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:44353/user/jenkins/test-data/3928cabf-301a-0f2c-49c9-564d3af68bd3 2024-12-09T00:46:09,374 WARN [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T00:46:09,374 WARN [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T00:46:09,380 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1006682308_22 at /127.0.0.1:35770 [Receiving block BP-542836953-172.17.0.2-1733705162438:blk_-9223372036854775664_1022] {}] datanode.DataXceiver(331): 127.0.0.1:39493:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35770 dst: /127.0.0.1:39493 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T00:46:09,388 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39493 is added to blk_-9223372036854775664_1023 (size=51) 2024-12-09T00:46:09,389 WARN [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-09T00:46:09,390 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1733705169326.158988fdf2b02313181214e78f95fcd2.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T00:46:09,390 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1722): Closing 158988fdf2b02313181214e78f95fcd2, disabling compactions & flushes 2024-12-09T00:46:09,390 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1733705169326.158988fdf2b02313181214e78f95fcd2. 2024-12-09T00:46:09,390 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1733705169326.158988fdf2b02313181214e78f95fcd2. 2024-12-09T00:46:09,390 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1733705169326.158988fdf2b02313181214e78f95fcd2. after waiting 0 ms 2024-12-09T00:46:09,390 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1733705169326.158988fdf2b02313181214e78f95fcd2. 2024-12-09T00:46:09,391 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1733705169326.158988fdf2b02313181214e78f95fcd2. 2024-12-09T00:46:09,391 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1676): Region close journal for 158988fdf2b02313181214e78f95fcd2: Waiting for close lock at 1733705169390Disabling compacts and flushes for region at 1733705169390Disabling writes for close at 1733705169390Writing region close event to WAL at 1733705169390Closed at 1733705169390 2024-12-09T00:46:09,393 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ADD_TO_META 2024-12-09T00:46:09,397 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestHBaseWalOnEC,,1733705169326.158988fdf2b02313181214e78f95fcd2.","families":{"info":[{"qualifier":"regioninfo","vlen":50,"tag":[],"timestamp":"1733705169393"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733705169393"}]},"ts":"1733705169393"} 2024-12-09T00:46:09,402 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-09T00:46:09,404 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-09T00:46:09,406 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733705169404"}]},"ts":"1733705169404"} 2024-12-09T00:46:09,411 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLING in hbase:meta 2024-12-09T00:46:09,411 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {1bae0942de96=0} racks are {/default-rack=0} 2024-12-09T00:46:09,412 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-09T00:46:09,413 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-09T00:46:09,413 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-09T00:46:09,413 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-09T00:46:09,413 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-09T00:46:09,413 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-09T00:46:09,413 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-09T00:46:09,413 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-09T00:46:09,413 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-09T00:46:09,413 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-09T00:46:09,414 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=158988fdf2b02313181214e78f95fcd2, ASSIGN}] 2024-12-09T00:46:09,417 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=158988fdf2b02313181214e78f95fcd2, ASSIGN 2024-12-09T00:46:09,418 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=158988fdf2b02313181214e78f95fcd2, ASSIGN; state=OFFLINE, location=1bae0942de96,43375,1733705166954; forceNewPlan=false, retain=false 2024-12-09T00:46:09,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46663 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-09T00:46:09,572 INFO [1bae0942de96:46663 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-09T00:46:09,573 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=158988fdf2b02313181214e78f95fcd2, regionState=OPENING, regionLocation=1bae0942de96,43375,1733705166954 2024-12-09T00:46:09,579 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=158988fdf2b02313181214e78f95fcd2, ASSIGN because future has completed 2024-12-09T00:46:09,580 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 158988fdf2b02313181214e78f95fcd2, server=1bae0942de96,43375,1733705166954}] 2024-12-09T00:46:09,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46663 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-09T00:46:09,735 DEBUG [RSProcedureDispatcher-pool-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-09T00:46:09,737 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50707, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-09T00:46:09,745 INFO [RS_OPEN_REGION-regionserver/1bae0942de96:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestHBaseWalOnEC,,1733705169326.158988fdf2b02313181214e78f95fcd2. 2024-12-09T00:46:09,745 DEBUG [RS_OPEN_REGION-regionserver/1bae0942de96:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 158988fdf2b02313181214e78f95fcd2, NAME => 'TestHBaseWalOnEC,,1733705169326.158988fdf2b02313181214e78f95fcd2.', STARTKEY => '', ENDKEY => ''} 2024-12-09T00:46:09,746 DEBUG [RS_OPEN_REGION-regionserver/1bae0942de96:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestHBaseWalOnEC 158988fdf2b02313181214e78f95fcd2 2024-12-09T00:46:09,746 DEBUG [RS_OPEN_REGION-regionserver/1bae0942de96:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1733705169326.158988fdf2b02313181214e78f95fcd2.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T00:46:09,746 DEBUG [RS_OPEN_REGION-regionserver/1bae0942de96:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 158988fdf2b02313181214e78f95fcd2 2024-12-09T00:46:09,746 DEBUG [RS_OPEN_REGION-regionserver/1bae0942de96:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 158988fdf2b02313181214e78f95fcd2 2024-12-09T00:46:09,749 INFO [StoreOpener-158988fdf2b02313181214e78f95fcd2-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 158988fdf2b02313181214e78f95fcd2 2024-12-09T00:46:09,751 INFO [StoreOpener-158988fdf2b02313181214e78f95fcd2-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 158988fdf2b02313181214e78f95fcd2 columnFamilyName cf 2024-12-09T00:46:09,751 DEBUG [StoreOpener-158988fdf2b02313181214e78f95fcd2-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T00:46:09,752 INFO [StoreOpener-158988fdf2b02313181214e78f95fcd2-1 {}] regionserver.HStore(327): Store=158988fdf2b02313181214e78f95fcd2/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T00:46:09,752 DEBUG [RS_OPEN_REGION-regionserver/1bae0942de96:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 158988fdf2b02313181214e78f95fcd2 2024-12-09T00:46:09,753 DEBUG [RS_OPEN_REGION-regionserver/1bae0942de96:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44353/user/jenkins/test-data/3928cabf-301a-0f2c-49c9-564d3af68bd3/data/default/TestHBaseWalOnEC/158988fdf2b02313181214e78f95fcd2 2024-12-09T00:46:09,754 DEBUG [RS_OPEN_REGION-regionserver/1bae0942de96:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44353/user/jenkins/test-data/3928cabf-301a-0f2c-49c9-564d3af68bd3/data/default/TestHBaseWalOnEC/158988fdf2b02313181214e78f95fcd2 2024-12-09T00:46:09,755 DEBUG [RS_OPEN_REGION-regionserver/1bae0942de96:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 158988fdf2b02313181214e78f95fcd2 2024-12-09T00:46:09,755 DEBUG [RS_OPEN_REGION-regionserver/1bae0942de96:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 158988fdf2b02313181214e78f95fcd2 2024-12-09T00:46:09,758 DEBUG [RS_OPEN_REGION-regionserver/1bae0942de96:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 158988fdf2b02313181214e78f95fcd2 2024-12-09T00:46:09,764 DEBUG [RS_OPEN_REGION-regionserver/1bae0942de96:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44353/user/jenkins/test-data/3928cabf-301a-0f2c-49c9-564d3af68bd3/data/default/TestHBaseWalOnEC/158988fdf2b02313181214e78f95fcd2/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T00:46:09,765 INFO [RS_OPEN_REGION-regionserver/1bae0942de96:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 158988fdf2b02313181214e78f95fcd2; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=63938371, jitterRate=-0.04724402725696564}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-09T00:46:09,765 DEBUG [RS_OPEN_REGION-regionserver/1bae0942de96:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 158988fdf2b02313181214e78f95fcd2 2024-12-09T00:46:09,766 DEBUG [RS_OPEN_REGION-regionserver/1bae0942de96:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 158988fdf2b02313181214e78f95fcd2: Running coprocessor pre-open hook at 1733705169746Writing region info on filesystem at 1733705169746Initializing all the Stores at 1733705169748 (+2 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733705169748Cleaning up temporary data from old regions at 1733705169755 (+7 ms)Running coprocessor post-open hooks at 1733705169765 (+10 ms)Region opened successfully at 1733705169766 (+1 ms) 2024-12-09T00:46:09,768 INFO [RS_OPEN_REGION-regionserver/1bae0942de96:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestHBaseWalOnEC,,1733705169326.158988fdf2b02313181214e78f95fcd2., pid=6, masterSystemTime=1733705169734 2024-12-09T00:46:09,772 DEBUG [RS_OPEN_REGION-regionserver/1bae0942de96:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestHBaseWalOnEC,,1733705169326.158988fdf2b02313181214e78f95fcd2. 2024-12-09T00:46:09,772 INFO [RS_OPEN_REGION-regionserver/1bae0942de96:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestHBaseWalOnEC,,1733705169326.158988fdf2b02313181214e78f95fcd2. 2024-12-09T00:46:09,773 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=158988fdf2b02313181214e78f95fcd2, regionState=OPEN, openSeqNum=2, regionLocation=1bae0942de96,43375,1733705166954 2024-12-09T00:46:09,780 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 158988fdf2b02313181214e78f95fcd2, server=1bae0942de96,43375,1733705166954 because future has completed 2024-12-09T00:46:09,786 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-09T00:46:09,787 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 158988fdf2b02313181214e78f95fcd2, server=1bae0942de96,43375,1733705166954 in 202 msec 2024-12-09T00:46:09,789 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-09T00:46:09,790 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=158988fdf2b02313181214e78f95fcd2, ASSIGN in 372 msec 2024-12-09T00:46:09,791 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-09T00:46:09,791 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733705169791"}]},"ts":"1733705169791"} 2024-12-09T00:46:09,794 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLED in hbase:meta 2024-12-09T00:46:09,796 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_POST_OPERATION 2024-12-09T00:46:09,800 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC in 463 msec 2024-12-09T00:46:09,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46663 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-09T00:46:09,980 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table TestHBaseWalOnEC get assigned. Timeout = 60000ms 2024-12-09T00:46:09,980 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestHBaseWalOnEC completed 2024-12-09T00:46:09,981 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-09T00:46:09,986 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table TestHBaseWalOnEC assigned to meta. Checking AM states. 2024-12-09T00:46:09,986 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-09T00:46:09,987 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table TestHBaseWalOnEC assigned. 2024-12-09T00:46:09,995 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestHBaseWalOnEC', row='row', locateType=CURRENT is [region=TestHBaseWalOnEC,,1733705169326.158988fdf2b02313181214e78f95fcd2., hostname=1bae0942de96,43375,1733705166954, seqNum=2] 2024-12-09T00:46:09,996 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T00:46:09,998 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39754, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T00:46:10,006 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46663 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestHBaseWalOnEC 2024-12-09T00:46:10,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46663 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC 2024-12-09T00:46:10,013 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_PREPARE 2024-12-09T00:46:10,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46663 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-09T00:46:10,014 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-09T00:46:10,016 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-09T00:46:10,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46663 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-09T00:46:10,184 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43375 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-12-09T00:46:10,185 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1bae0942de96:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestHBaseWalOnEC,,1733705169326.158988fdf2b02313181214e78f95fcd2. 2024-12-09T00:46:10,189 INFO [RS_FLUSH_OPERATIONS-regionserver/1bae0942de96:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing 158988fdf2b02313181214e78f95fcd2 1/1 column families, dataSize=32 B heapSize=360 B 2024-12-09T00:46:10,236 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1bae0942de96:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44353/user/jenkins/test-data/3928cabf-301a-0f2c-49c9-564d3af68bd3/data/default/TestHBaseWalOnEC/158988fdf2b02313181214e78f95fcd2/.tmp/cf/1a86ba6f0de748d08a4e65a436c017a4 is 36, key is row/cf:cq/1733705169999/Put/seqid=0 2024-12-09T00:46:10,242 WARN [RS_FLUSH_OPERATIONS-regionserver/1bae0942de96:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T00:46:10,243 WARN [RS_FLUSH_OPERATIONS-regionserver/1bae0942de96:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T00:46:10,246 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-176595700_22 at /127.0.0.1:58576 [Receiving block BP-542836953-172.17.0.2-1733705162438:blk_-9223372036854775648_1024] {}] datanode.DataXceiver(331): 127.0.0.1:36209:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:58576 dst: /127.0.0.1:36209 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T00:46:10,251 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36209 is added to blk_-9223372036854775648_1025 (size=4787) 2024-12-09T00:46:10,251 WARN [RS_FLUSH_OPERATIONS-regionserver/1bae0942de96:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-09T00:46:10,252 INFO [RS_FLUSH_OPERATIONS-regionserver/1bae0942de96:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=32 B at sequenceid=5 (bloomFilter=false), to=hdfs://localhost:44353/user/jenkins/test-data/3928cabf-301a-0f2c-49c9-564d3af68bd3/data/default/TestHBaseWalOnEC/158988fdf2b02313181214e78f95fcd2/.tmp/cf/1a86ba6f0de748d08a4e65a436c017a4 2024-12-09T00:46:10,296 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1bae0942de96:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44353/user/jenkins/test-data/3928cabf-301a-0f2c-49c9-564d3af68bd3/data/default/TestHBaseWalOnEC/158988fdf2b02313181214e78f95fcd2/.tmp/cf/1a86ba6f0de748d08a4e65a436c017a4 as hdfs://localhost:44353/user/jenkins/test-data/3928cabf-301a-0f2c-49c9-564d3af68bd3/data/default/TestHBaseWalOnEC/158988fdf2b02313181214e78f95fcd2/cf/1a86ba6f0de748d08a4e65a436c017a4 2024-12-09T00:46:10,307 INFO [RS_FLUSH_OPERATIONS-regionserver/1bae0942de96:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44353/user/jenkins/test-data/3928cabf-301a-0f2c-49c9-564d3af68bd3/data/default/TestHBaseWalOnEC/158988fdf2b02313181214e78f95fcd2/cf/1a86ba6f0de748d08a4e65a436c017a4, entries=1, sequenceid=5, filesize=4.7 K 2024-12-09T00:46:10,315 INFO [RS_FLUSH_OPERATIONS-regionserver/1bae0942de96:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~32 B/32, heapSize ~344 B/344, currentSize=0 B/0 for 158988fdf2b02313181214e78f95fcd2 in 125ms, sequenceid=5, compaction requested=false 2024-12-09T00:46:10,317 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1bae0942de96:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestHBaseWalOnEC' 2024-12-09T00:46:10,319 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1bae0942de96:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for 158988fdf2b02313181214e78f95fcd2: 2024-12-09T00:46:10,319 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1bae0942de96:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestHBaseWalOnEC,,1733705169326.158988fdf2b02313181214e78f95fcd2. 2024-12-09T00:46:10,320 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1bae0942de96:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-12-09T00:46:10,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46663 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-12-09T00:46:10,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46663 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-09T00:46:10,328 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-12-09T00:46:10,328 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 309 msec 2024-12-09T00:46:10,333 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC in 323 msec 2024-12-09T00:46:10,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46663 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-09T00:46:10,639 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestHBaseWalOnEC completed 2024-12-09T00:46:10,658 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-09T00:46:10,659 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-09T00:46:10,659 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T00:46:10,663 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T00:46:10,663 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T00:46:10,663 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-09T00:46:10,663 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-09T00:46:10,664 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1099508225, stopped=false 2024-12-09T00:46:10,664 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=1bae0942de96,46663,1733705166187 2024-12-09T00:46:10,721 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38143-0x100081728f20003, quorum=127.0.0.1:51763, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-09T00:46:10,721 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43359-0x100081728f20001, quorum=127.0.0.1:51763, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-09T00:46:10,721 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43375-0x100081728f20002, quorum=127.0.0.1:51763, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-09T00:46:10,721 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38143-0x100081728f20003, quorum=127.0.0.1:51763, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T00:46:10,721 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43359-0x100081728f20001, quorum=127.0.0.1:51763, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T00:46:10,722 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43375-0x100081728f20002, quorum=127.0.0.1:51763, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T00:46:10,722 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-09T00:46:10,722 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46663-0x100081728f20000, quorum=127.0.0.1:51763, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-09T00:46:10,722 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46663-0x100081728f20000, quorum=127.0.0.1:51763, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T00:46:10,722 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-09T00:46:10,722 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T00:46:10,722 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:38143-0x100081728f20003, quorum=127.0.0.1:51763, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T00:46:10,722 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:43375-0x100081728f20002, quorum=127.0.0.1:51763, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T00:46:10,722 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T00:46:10,722 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:43359-0x100081728f20001, quorum=127.0.0.1:51763, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T00:46:10,722 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:46663-0x100081728f20000, quorum=127.0.0.1:51763, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T00:46:10,723 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '1bae0942de96,43359,1733705166853' ***** 2024-12-09T00:46:10,723 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-09T00:46:10,723 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '1bae0942de96,43375,1733705166954' ***** 2024-12-09T00:46:10,723 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-09T00:46:10,723 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '1bae0942de96,38143,1733705167014' ***** 2024-12-09T00:46:10,723 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-09T00:46:10,723 INFO [RS:0;1bae0942de96:43359 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-09T00:46:10,724 INFO [RS:1;1bae0942de96:43375 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-09T00:46:10,724 INFO [RS:2;1bae0942de96:38143 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-09T00:46:10,724 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-09T00:46:10,724 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-09T00:46:10,724 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-09T00:46:10,724 INFO [RS:0;1bae0942de96:43359 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-09T00:46:10,724 INFO [RS:2;1bae0942de96:38143 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-09T00:46:10,724 INFO [RS:1;1bae0942de96:43375 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-09T00:46:10,724 INFO [RS:0;1bae0942de96:43359 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-09T00:46:10,724 INFO [RS:2;1bae0942de96:38143 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-09T00:46:10,724 INFO [RS:1;1bae0942de96:43375 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-09T00:46:10,724 INFO [RS:2;1bae0942de96:38143 {}] regionserver.HRegionServer(959): stopping server 1bae0942de96,38143,1733705167014 2024-12-09T00:46:10,724 INFO [RS:0;1bae0942de96:43359 {}] regionserver.HRegionServer(959): stopping server 1bae0942de96,43359,1733705166853 2024-12-09T00:46:10,724 INFO [RS:2;1bae0942de96:38143 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-09T00:46:10,724 INFO [RS:0;1bae0942de96:43359 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-09T00:46:10,724 INFO [RS:1;1bae0942de96:43375 {}] regionserver.HRegionServer(3091): Received CLOSE for 158988fdf2b02313181214e78f95fcd2 2024-12-09T00:46:10,724 INFO [RS:0;1bae0942de96:43359 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;1bae0942de96:43359. 2024-12-09T00:46:10,724 INFO [RS:2;1bae0942de96:38143 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:2;1bae0942de96:38143. 2024-12-09T00:46:10,724 DEBUG [RS:0;1bae0942de96:43359 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T00:46:10,725 DEBUG [RS:2;1bae0942de96:38143 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T00:46:10,725 DEBUG [RS:0;1bae0942de96:43359 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T00:46:10,725 DEBUG [RS:2;1bae0942de96:38143 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T00:46:10,725 INFO [RS:2;1bae0942de96:38143 {}] regionserver.HRegionServer(976): stopping server 1bae0942de96,38143,1733705167014; all regions closed. 2024-12-09T00:46:10,725 INFO [RS:0;1bae0942de96:43359 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-09T00:46:10,725 INFO [RS:0;1bae0942de96:43359 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-09T00:46:10,725 INFO [RS:0;1bae0942de96:43359 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-09T00:46:10,725 INFO [RS:0;1bae0942de96:43359 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-09T00:46:10,725 INFO [RS:1;1bae0942de96:43375 {}] regionserver.HRegionServer(959): stopping server 1bae0942de96,43375,1733705166954 2024-12-09T00:46:10,725 INFO [RS:1;1bae0942de96:43375 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-09T00:46:10,725 INFO [RS:1;1bae0942de96:43375 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;1bae0942de96:43375. 2024-12-09T00:46:10,725 DEBUG [RS:1;1bae0942de96:43375 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T00:46:10,725 DEBUG [RS:1;1bae0942de96:43375 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T00:46:10,725 INFO [RS:0;1bae0942de96:43359 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-12-09T00:46:10,725 INFO [RS:1;1bae0942de96:43375 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-12-09T00:46:10,725 DEBUG [RS:0;1bae0942de96:43359 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-12-09T00:46:10,725 DEBUG [RS:1;1bae0942de96:43375 {}] regionserver.HRegionServer(1325): Online Regions={158988fdf2b02313181214e78f95fcd2=TestHBaseWalOnEC,,1733705169326.158988fdf2b02313181214e78f95fcd2.} 2024-12-09T00:46:10,726 DEBUG [RS_CLOSE_REGION-regionserver/1bae0942de96:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 158988fdf2b02313181214e78f95fcd2, disabling compactions & flushes 2024-12-09T00:46:10,726 DEBUG [RS_CLOSE_META-regionserver/1bae0942de96:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-09T00:46:10,726 INFO [RS_CLOSE_REGION-regionserver/1bae0942de96:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1733705169326.158988fdf2b02313181214e78f95fcd2. 2024-12-09T00:46:10,726 INFO [RS_CLOSE_META-regionserver/1bae0942de96:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-09T00:46:10,726 DEBUG [RS:1;1bae0942de96:43375 {}] regionserver.HRegionServer(1351): Waiting on 158988fdf2b02313181214e78f95fcd2 2024-12-09T00:46:10,726 DEBUG [RS:0;1bae0942de96:43359 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-12-09T00:46:10,726 DEBUG [RS_CLOSE_REGION-regionserver/1bae0942de96:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1733705169326.158988fdf2b02313181214e78f95fcd2. 2024-12-09T00:46:10,726 DEBUG [RS_CLOSE_META-regionserver/1bae0942de96:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-09T00:46:10,726 DEBUG [RS_CLOSE_META-regionserver/1bae0942de96:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-09T00:46:10,726 DEBUG [RS_CLOSE_REGION-regionserver/1bae0942de96:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1733705169326.158988fdf2b02313181214e78f95fcd2. after waiting 0 ms 2024-12-09T00:46:10,726 DEBUG [RS_CLOSE_META-regionserver/1bae0942de96:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-09T00:46:10,726 DEBUG [RS_CLOSE_REGION-regionserver/1bae0942de96:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1733705169326.158988fdf2b02313181214e78f95fcd2. 2024-12-09T00:46:10,726 INFO [RS_CLOSE_META-regionserver/1bae0942de96:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.34 KB heapSize=3.38 KB 2024-12-09T00:46:10,731 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36209 is added to blk_1073741828_1018 (size=93) 2024-12-09T00:46:10,732 WARN [Close-WAL-Writer-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(650): complete file /user/jenkins/test-data/3928cabf-301a-0f2c-49c9-564d3af68bd3/WALs/1bae0942de96,38143,1733705167014/1bae0942de96%2C38143%2C1733705167014.1733705168461 not finished, retry = 0 2024-12-09T00:46:10,732 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36967 is added to blk_1073741828_1018 (size=93) 2024-12-09T00:46:10,732 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39493 is added to blk_1073741828_1018 (size=93) 2024-12-09T00:46:10,744 DEBUG [RS_CLOSE_REGION-regionserver/1bae0942de96:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44353/user/jenkins/test-data/3928cabf-301a-0f2c-49c9-564d3af68bd3/data/default/TestHBaseWalOnEC/158988fdf2b02313181214e78f95fcd2/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-09T00:46:10,747 INFO [RS_CLOSE_REGION-regionserver/1bae0942de96:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1733705169326.158988fdf2b02313181214e78f95fcd2. 2024-12-09T00:46:10,747 DEBUG [RS_CLOSE_REGION-regionserver/1bae0942de96:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 158988fdf2b02313181214e78f95fcd2: Waiting for close lock at 1733705170725Running coprocessor pre-close hooks at 1733705170726 (+1 ms)Disabling compacts and flushes for region at 1733705170726Disabling writes for close at 1733705170726Writing region close event to WAL at 1733705170727 (+1 ms)Running coprocessor post-close hooks at 1733705170745 (+18 ms)Closed at 1733705170747 (+2 ms) 2024-12-09T00:46:10,747 DEBUG [RS_CLOSE_REGION-regionserver/1bae0942de96:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestHBaseWalOnEC,,1733705169326.158988fdf2b02313181214e78f95fcd2. 2024-12-09T00:46:10,760 DEBUG [RS_CLOSE_META-regionserver/1bae0942de96:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44353/user/jenkins/test-data/3928cabf-301a-0f2c-49c9-564d3af68bd3/data/hbase/meta/1588230740/.tmp/info/3017b8a37ba444f1a56bbbfd7f349b4c is 153, key is TestHBaseWalOnEC,,1733705169326.158988fdf2b02313181214e78f95fcd2./info:regioninfo/1733705169773/Put/seqid=0 2024-12-09T00:46:10,763 WARN [RS_CLOSE_META-regionserver/1bae0942de96:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T00:46:10,764 WARN [RS_CLOSE_META-regionserver/1bae0942de96:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T00:46:10,767 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1391166198_22 at /127.0.0.1:56866 [Receiving block BP-542836953-172.17.0.2-1733705162438:blk_-9223372036854775632_1026] {}] datanode.DataXceiver(331): 127.0.0.1:36967:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:56866 dst: /127.0.0.1:36967 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T00:46:10,772 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36967 is added to blk_-9223372036854775632_1027 (size=6637) 2024-12-09T00:46:10,773 WARN [RS_CLOSE_META-regionserver/1bae0942de96:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-09T00:46:10,773 INFO [RS_CLOSE_META-regionserver/1bae0942de96:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.18 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:44353/user/jenkins/test-data/3928cabf-301a-0f2c-49c9-564d3af68bd3/data/hbase/meta/1588230740/.tmp/info/3017b8a37ba444f1a56bbbfd7f349b4c 2024-12-09T00:46:10,804 DEBUG [RS_CLOSE_META-regionserver/1bae0942de96:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44353/user/jenkins/test-data/3928cabf-301a-0f2c-49c9-564d3af68bd3/data/hbase/meta/1588230740/.tmp/ns/0c19b70c4e7e42e0b75d97595a00963e is 43, key is default/ns:d/1733705169093/Put/seqid=0 2024-12-09T00:46:10,807 WARN [RS_CLOSE_META-regionserver/1bae0942de96:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T00:46:10,807 WARN [RS_CLOSE_META-regionserver/1bae0942de96:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T00:46:10,810 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1391166198_22 at /127.0.0.1:56894 [Receiving block BP-542836953-172.17.0.2-1733705162438:blk_-9223372036854775616_1028] {}] datanode.DataXceiver(331): 127.0.0.1:36967:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:56894 dst: /127.0.0.1:36967 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T00:46:10,814 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36967 is added to blk_-9223372036854775616_1029 (size=5153) 2024-12-09T00:46:10,815 WARN [RS_CLOSE_META-regionserver/1bae0942de96:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-09T00:46:10,815 INFO [RS_CLOSE_META-regionserver/1bae0942de96:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:44353/user/jenkins/test-data/3928cabf-301a-0f2c-49c9-564d3af68bd3/data/hbase/meta/1588230740/.tmp/ns/0c19b70c4e7e42e0b75d97595a00963e 2024-12-09T00:46:10,817 INFO [regionserver/1bae0942de96:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T00:46:10,817 INFO [regionserver/1bae0942de96:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T00:46:10,818 INFO [regionserver/1bae0942de96:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T00:46:10,837 DEBUG [RS:2;1bae0942de96:38143 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/3928cabf-301a-0f2c-49c9-564d3af68bd3/oldWALs 2024-12-09T00:46:10,838 INFO [RS:2;1bae0942de96:38143 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 1bae0942de96%2C38143%2C1733705167014:(num 1733705168461) 2024-12-09T00:46:10,838 DEBUG [RS:2;1bae0942de96:38143 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T00:46:10,838 INFO [RS:2;1bae0942de96:38143 {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T00:46:10,838 INFO [RS:2;1bae0942de96:38143 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-09T00:46:10,838 INFO [RS:2;1bae0942de96:38143 {}] hbase.ChoreService(370): Chore service for: regionserver/1bae0942de96:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-09T00:46:10,838 INFO [RS:2;1bae0942de96:38143 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-09T00:46:10,838 INFO [RS:2;1bae0942de96:38143 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-09T00:46:10,838 INFO [regionserver/1bae0942de96:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-09T00:46:10,839 INFO [RS:2;1bae0942de96:38143 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-09T00:46:10,839 INFO [RS:2;1bae0942de96:38143 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-09T00:46:10,839 INFO [RS:2;1bae0942de96:38143 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:38143 2024-12-09T00:46:10,840 DEBUG [RS_CLOSE_META-regionserver/1bae0942de96:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44353/user/jenkins/test-data/3928cabf-301a-0f2c-49c9-564d3af68bd3/data/hbase/meta/1588230740/.tmp/table/f09b0358d5cd41488dda6451eafb0708 is 52, key is TestHBaseWalOnEC/table:state/1733705169791/Put/seqid=0 2024-12-09T00:46:10,843 WARN [RS_CLOSE_META-regionserver/1bae0942de96:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T00:46:10,843 WARN [RS_CLOSE_META-regionserver/1bae0942de96:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T00:46:10,845 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1391166198_22 at /127.0.0.1:58598 [Receiving block BP-542836953-172.17.0.2-1733705162438:blk_-9223372036854775600_1030] {}] datanode.DataXceiver(331): 127.0.0.1:36209:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:58598 dst: /127.0.0.1:36209 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T00:46:10,850 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36209 is added to blk_-9223372036854775600_1031 (size=5249) 2024-12-09T00:46:10,851 WARN [RS_CLOSE_META-regionserver/1bae0942de96:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-09T00:46:10,851 INFO [RS_CLOSE_META-regionserver/1bae0942de96:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=96 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:44353/user/jenkins/test-data/3928cabf-301a-0f2c-49c9-564d3af68bd3/data/hbase/meta/1588230740/.tmp/table/f09b0358d5cd41488dda6451eafb0708 2024-12-09T00:46:10,861 DEBUG [RS_CLOSE_META-regionserver/1bae0942de96:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44353/user/jenkins/test-data/3928cabf-301a-0f2c-49c9-564d3af68bd3/data/hbase/meta/1588230740/.tmp/info/3017b8a37ba444f1a56bbbfd7f349b4c as hdfs://localhost:44353/user/jenkins/test-data/3928cabf-301a-0f2c-49c9-564d3af68bd3/data/hbase/meta/1588230740/info/3017b8a37ba444f1a56bbbfd7f349b4c 2024-12-09T00:46:10,871 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38143-0x100081728f20003, quorum=127.0.0.1:51763, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/1bae0942de96,38143,1733705167014 2024-12-09T00:46:10,871 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46663-0x100081728f20000, quorum=127.0.0.1:51763, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-09T00:46:10,871 INFO [RS:2;1bae0942de96:38143 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-09T00:46:10,871 INFO [RS_CLOSE_META-regionserver/1bae0942de96:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44353/user/jenkins/test-data/3928cabf-301a-0f2c-49c9-564d3af68bd3/data/hbase/meta/1588230740/info/3017b8a37ba444f1a56bbbfd7f349b4c, entries=10, sequenceid=11, filesize=6.5 K 2024-12-09T00:46:10,872 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [1bae0942de96,38143,1733705167014] 2024-12-09T00:46:10,873 DEBUG [RS_CLOSE_META-regionserver/1bae0942de96:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44353/user/jenkins/test-data/3928cabf-301a-0f2c-49c9-564d3af68bd3/data/hbase/meta/1588230740/.tmp/ns/0c19b70c4e7e42e0b75d97595a00963e as hdfs://localhost:44353/user/jenkins/test-data/3928cabf-301a-0f2c-49c9-564d3af68bd3/data/hbase/meta/1588230740/ns/0c19b70c4e7e42e0b75d97595a00963e 2024-12-09T00:46:10,882 INFO [RS_CLOSE_META-regionserver/1bae0942de96:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44353/user/jenkins/test-data/3928cabf-301a-0f2c-49c9-564d3af68bd3/data/hbase/meta/1588230740/ns/0c19b70c4e7e42e0b75d97595a00963e, entries=2, sequenceid=11, filesize=5.0 K 2024-12-09T00:46:10,884 DEBUG [RS_CLOSE_META-regionserver/1bae0942de96:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44353/user/jenkins/test-data/3928cabf-301a-0f2c-49c9-564d3af68bd3/data/hbase/meta/1588230740/.tmp/table/f09b0358d5cd41488dda6451eafb0708 as hdfs://localhost:44353/user/jenkins/test-data/3928cabf-301a-0f2c-49c9-564d3af68bd3/data/hbase/meta/1588230740/table/f09b0358d5cd41488dda6451eafb0708 2024-12-09T00:46:10,888 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/1bae0942de96,38143,1733705167014 already deleted, retry=false 2024-12-09T00:46:10,888 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 1bae0942de96,38143,1733705167014 expired; onlineServers=2 2024-12-09T00:46:10,893 INFO [RS_CLOSE_META-regionserver/1bae0942de96:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44353/user/jenkins/test-data/3928cabf-301a-0f2c-49c9-564d3af68bd3/data/hbase/meta/1588230740/table/f09b0358d5cd41488dda6451eafb0708, entries=2, sequenceid=11, filesize=5.1 K 2024-12-09T00:46:10,895 INFO [RS_CLOSE_META-regionserver/1bae0942de96:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 169ms, sequenceid=11, compaction requested=false 2024-12-09T00:46:10,895 DEBUG [RS_CLOSE_META-regionserver/1bae0942de96:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-09T00:46:10,903 DEBUG [RS_CLOSE_META-regionserver/1bae0942de96:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44353/user/jenkins/test-data/3928cabf-301a-0f2c-49c9-564d3af68bd3/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-12-09T00:46:10,904 DEBUG [RS_CLOSE_META-regionserver/1bae0942de96:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-09T00:46:10,904 INFO [RS_CLOSE_META-regionserver/1bae0942de96:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-09T00:46:10,904 DEBUG [RS_CLOSE_META-regionserver/1bae0942de96:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733705170725Running coprocessor pre-close hooks at 1733705170726 (+1 ms)Disabling compacts and flushes for region at 1733705170726Disabling writes for close at 1733705170726Obtaining lock to block concurrent updates at 1733705170726Preparing flush snapshotting stores in 1588230740 at 1733705170726Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1377, getHeapSize=3392, getOffHeapSize=0, getCellsCount=14 at 1733705170727 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1733705170728 (+1 ms)Flushing 1588230740/info: creating writer at 1733705170728Flushing 1588230740/info: appending metadata at 1733705170757 (+29 ms)Flushing 1588230740/info: closing flushed file at 1733705170757Flushing 1588230740/ns: creating writer at 1733705170784 (+27 ms)Flushing 1588230740/ns: appending metadata at 1733705170803 (+19 ms)Flushing 1588230740/ns: closing flushed file at 1733705170803Flushing 1588230740/table: creating writer at 1733705170824 (+21 ms)Flushing 1588230740/table: appending metadata at 1733705170839 (+15 ms)Flushing 1588230740/table: closing flushed file at 1733705170840 (+1 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5457c3c7: reopening flushed file at 1733705170860 (+20 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7c10f547: reopening flushed file at 1733705170872 (+12 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@53cd29fb: reopening flushed file at 1733705170883 (+11 ms)Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 169ms, sequenceid=11, compaction requested=false at 1733705170895 (+12 ms)Writing region close event to WAL at 1733705170897 (+2 ms)Running coprocessor post-close hooks at 1733705170904 (+7 ms)Closed at 1733705170904 2024-12-09T00:46:10,905 DEBUG [RS_CLOSE_META-regionserver/1bae0942de96:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-09T00:46:10,926 INFO [RS:1;1bae0942de96:43375 {}] regionserver.HRegionServer(976): stopping server 1bae0942de96,43375,1733705166954; all regions closed. 2024-12-09T00:46:10,926 INFO [RS:0;1bae0942de96:43359 {}] regionserver.HRegionServer(976): stopping server 1bae0942de96,43359,1733705166853; all regions closed. 2024-12-09T00:46:10,931 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39493 is added to blk_1073741829_1019 (size=2751) 2024-12-09T00:46:10,931 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36209 is added to blk_1073741829_1019 (size=2751) 2024-12-09T00:46:10,931 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36967 is added to blk_1073741829_1019 (size=2751) 2024-12-09T00:46:10,932 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39493 is added to blk_1073741827_1017 (size=1298) 2024-12-09T00:46:10,932 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36967 is added to blk_1073741827_1017 (size=1298) 2024-12-09T00:46:10,933 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36209 is added to blk_1073741827_1017 (size=1298) 2024-12-09T00:46:10,936 DEBUG [RS:0;1bae0942de96:43359 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/3928cabf-301a-0f2c-49c9-564d3af68bd3/oldWALs 2024-12-09T00:46:10,936 INFO [RS:0;1bae0942de96:43359 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 1bae0942de96%2C43359%2C1733705166853.meta:.meta(num 1733705168912) 2024-12-09T00:46:10,937 DEBUG [RS:1;1bae0942de96:43375 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/3928cabf-301a-0f2c-49c9-564d3af68bd3/oldWALs 2024-12-09T00:46:10,937 INFO [RS:1;1bae0942de96:43375 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 1bae0942de96%2C43375%2C1733705166954:(num 1733705168461) 2024-12-09T00:46:10,937 DEBUG [RS:1;1bae0942de96:43375 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T00:46:10,937 INFO [RS:1;1bae0942de96:43375 {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T00:46:10,937 INFO [RS:1;1bae0942de96:43375 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-09T00:46:10,937 INFO [RS:1;1bae0942de96:43375 {}] hbase.ChoreService(370): Chore service for: regionserver/1bae0942de96:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-09T00:46:10,938 INFO [RS:1;1bae0942de96:43375 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-09T00:46:10,938 INFO [RS:1;1bae0942de96:43375 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-09T00:46:10,938 INFO [regionserver/1bae0942de96:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-09T00:46:10,938 INFO [RS:1;1bae0942de96:43375 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-09T00:46:10,938 INFO [RS:1;1bae0942de96:43375 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-09T00:46:10,938 INFO [RS:1;1bae0942de96:43375 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:43375 2024-12-09T00:46:10,941 WARN [Close-WAL-Writer-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(650): complete file /user/jenkins/test-data/3928cabf-301a-0f2c-49c9-564d3af68bd3/WALs/1bae0942de96,43359,1733705166853/1bae0942de96%2C43359%2C1733705166853.1733705168463 not finished, retry = 0 2024-12-09T00:46:10,941 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36209 is added to blk_1073741826_1016 (size=93) 2024-12-09T00:46:10,941 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36967 is added to blk_1073741826_1016 (size=93) 2024-12-09T00:46:10,941 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39493 is added to blk_1073741826_1016 (size=93) 2024-12-09T00:46:10,963 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46663-0x100081728f20000, quorum=127.0.0.1:51763, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-09T00:46:10,963 INFO [RS:1;1bae0942de96:43375 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-09T00:46:10,963 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43375-0x100081728f20002, quorum=127.0.0.1:51763, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/1bae0942de96,43375,1733705166954 2024-12-09T00:46:10,967 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [1bae0942de96,43375,1733705166954] 2024-12-09T00:46:10,979 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/1bae0942de96,43375,1733705166954 already deleted, retry=false 2024-12-09T00:46:10,979 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 1bae0942de96,43375,1733705166954 expired; onlineServers=1 2024-12-09T00:46:10,980 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38143-0x100081728f20003, quorum=127.0.0.1:51763, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T00:46:10,980 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38143-0x100081728f20003, quorum=127.0.0.1:51763, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T00:46:10,980 INFO [RS:2;1bae0942de96:38143 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-09T00:46:10,981 INFO [RS:2;1bae0942de96:38143 {}] regionserver.HRegionServer(1031): Exiting; stopping=1bae0942de96,38143,1733705167014; zookeeper connection closed. 2024-12-09T00:46:10,981 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@2a5accaf {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@2a5accaf 2024-12-09T00:46:11,049 DEBUG [RS:0;1bae0942de96:43359 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/3928cabf-301a-0f2c-49c9-564d3af68bd3/oldWALs 2024-12-09T00:46:11,049 INFO [RS:0;1bae0942de96:43359 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 1bae0942de96%2C43359%2C1733705166853:(num 1733705168463) 2024-12-09T00:46:11,049 DEBUG [RS:0;1bae0942de96:43359 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T00:46:11,049 INFO [RS:0;1bae0942de96:43359 {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T00:46:11,049 INFO [RS:0;1bae0942de96:43359 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-09T00:46:11,049 INFO [RS:0;1bae0942de96:43359 {}] hbase.ChoreService(370): Chore service for: regionserver/1bae0942de96:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-09T00:46:11,050 INFO [RS:0;1bae0942de96:43359 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-09T00:46:11,050 INFO [regionserver/1bae0942de96:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-09T00:46:11,050 INFO [RS:0;1bae0942de96:43359 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:43359 2024-12-09T00:46:11,063 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46663-0x100081728f20000, quorum=127.0.0.1:51763, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-09T00:46:11,063 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43359-0x100081728f20001, quorum=127.0.0.1:51763, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/1bae0942de96,43359,1733705166853 2024-12-09T00:46:11,063 INFO [RS:0;1bae0942de96:43359 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-09T00:46:11,071 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [1bae0942de96,43359,1733705166853] 2024-12-09T00:46:11,071 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43375-0x100081728f20002, quorum=127.0.0.1:51763, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T00:46:11,071 INFO [RS:1;1bae0942de96:43375 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-09T00:46:11,071 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43375-0x100081728f20002, quorum=127.0.0.1:51763, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T00:46:11,071 INFO [RS:1;1bae0942de96:43375 {}] regionserver.HRegionServer(1031): Exiting; stopping=1bae0942de96,43375,1733705166954; zookeeper connection closed. 2024-12-09T00:46:11,072 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@66a8e025 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@66a8e025 2024-12-09T00:46:11,079 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/1bae0942de96,43359,1733705166853 already deleted, retry=false 2024-12-09T00:46:11,080 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 1bae0942de96,43359,1733705166853 expired; onlineServers=0 2024-12-09T00:46:11,080 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '1bae0942de96,46663,1733705166187' ***** 2024-12-09T00:46:11,080 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-09T00:46:11,080 INFO [M:0;1bae0942de96:46663 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-09T00:46:11,080 INFO [M:0;1bae0942de96:46663 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-09T00:46:11,081 DEBUG [M:0;1bae0942de96:46663 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-09T00:46:11,081 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-09T00:46:11,081 DEBUG [M:0;1bae0942de96:46663 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-09T00:46:11,081 DEBUG [master/1bae0942de96:0:becomeActiveMaster-HFileCleaner.small.0-1733705168059 {}] cleaner.HFileCleaner(306): Exit Thread[master/1bae0942de96:0:becomeActiveMaster-HFileCleaner.small.0-1733705168059,5,FailOnTimeoutGroup] 2024-12-09T00:46:11,081 DEBUG [master/1bae0942de96:0:becomeActiveMaster-HFileCleaner.large.0-1733705168058 {}] cleaner.HFileCleaner(306): Exit Thread[master/1bae0942de96:0:becomeActiveMaster-HFileCleaner.large.0-1733705168058,5,FailOnTimeoutGroup] 2024-12-09T00:46:11,081 INFO [M:0;1bae0942de96:46663 {}] hbase.ChoreService(370): Chore service for: master/1bae0942de96:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-09T00:46:11,082 INFO [M:0;1bae0942de96:46663 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-09T00:46:11,082 DEBUG [M:0;1bae0942de96:46663 {}] master.HMaster(1795): Stopping service threads 2024-12-09T00:46:11,082 INFO [M:0;1bae0942de96:46663 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-09T00:46:11,082 INFO [M:0;1bae0942de96:46663 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-09T00:46:11,083 INFO [M:0;1bae0942de96:46663 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-09T00:46:11,083 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-09T00:46:11,088 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46663-0x100081728f20000, quorum=127.0.0.1:51763, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-09T00:46:11,088 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46663-0x100081728f20000, quorum=127.0.0.1:51763, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T00:46:11,088 DEBUG [M:0;1bae0942de96:46663 {}] zookeeper.ZKUtil(347): master:46663-0x100081728f20000, quorum=127.0.0.1:51763, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-09T00:46:11,088 WARN [M:0;1bae0942de96:46663 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-09T00:46:11,089 INFO [M:0;1bae0942de96:46663 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:44353/user/jenkins/test-data/3928cabf-301a-0f2c-49c9-564d3af68bd3/.lastflushedseqids 2024-12-09T00:46:11,099 WARN [M:0;1bae0942de96:46663 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T00:46:11,099 WARN [M:0;1bae0942de96:46663 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T00:46:11,101 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1006682308_22 at /127.0.0.1:51192 [Receiving block BP-542836953-172.17.0.2-1733705162438:blk_-9223372036854775584_1032] {}] datanode.DataXceiver(331): 127.0.0.1:39493:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51192 dst: /127.0.0.1:39493 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T00:46:11,105 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39493 is added to blk_-9223372036854775584_1033 (size=127) 2024-12-09T00:46:11,106 WARN [M:0;1bae0942de96:46663 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-09T00:46:11,106 INFO [M:0;1bae0942de96:46663 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-09T00:46:11,106 INFO [M:0;1bae0942de96:46663 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-09T00:46:11,106 DEBUG [M:0;1bae0942de96:46663 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-09T00:46:11,107 INFO [M:0;1bae0942de96:46663 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T00:46:11,107 DEBUG [M:0;1bae0942de96:46663 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T00:46:11,107 DEBUG [M:0;1bae0942de96:46663 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-09T00:46:11,107 DEBUG [M:0;1bae0942de96:46663 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T00:46:11,107 INFO [M:0;1bae0942de96:46663 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=26.84 KB heapSize=34.13 KB 2024-12-09T00:46:11,116 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36967 is added to blk_-9223372036854775757_1006 (size=196) 2024-12-09T00:46:11,120 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36209 is added to blk_-9223372036854775741_1008 (size=1189) 2024-12-09T00:46:11,120 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39493 is added to blk_-9223372036854775756_1006 (size=196) 2024-12-09T00:46:11,121 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36967 is added to blk_-9223372036854775740_1008 (size=1189) 2024-12-09T00:46:11,121 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36209 is added to blk_-9223372036854775709_1013 (size=1321) 2024-12-09T00:46:11,122 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36209 is added to blk_-9223372036854775724_1010 (size=34) 2024-12-09T00:46:11,123 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36967 is added to blk_-9223372036854775708_1013 (size=1321) 2024-12-09T00:46:11,123 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36967 is added to blk_-9223372036854775725_1010 (size=34) 2024-12-09T00:46:11,138 DEBUG [M:0;1bae0942de96:46663 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44353/user/jenkins/test-data/3928cabf-301a-0f2c-49c9-564d3af68bd3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/e04dc6a8086e45839d426c2de3505506 is 82, key is hbase:meta,,1/info:regioninfo/1733705168992/Put/seqid=0 2024-12-09T00:46:11,141 WARN [M:0;1bae0942de96:46663 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T00:46:11,141 WARN [M:0;1bae0942de96:46663 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T00:46:11,143 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1006682308_22 at /127.0.0.1:45812 [Receiving block BP-542836953-172.17.0.2-1733705162438:blk_-9223372036854775568_1034] {}] datanode.DataXceiver(331): 127.0.0.1:36967:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45812 dst: /127.0.0.1:36967 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T00:46:11,147 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36967 is added to blk_-9223372036854775568_1035 (size=5672) 2024-12-09T00:46:11,147 WARN [M:0;1bae0942de96:46663 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-09T00:46:11,148 INFO [M:0;1bae0942de96:46663 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:44353/user/jenkins/test-data/3928cabf-301a-0f2c-49c9-564d3af68bd3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/e04dc6a8086e45839d426c2de3505506 2024-12-09T00:46:11,171 INFO [RS:0;1bae0942de96:43359 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-09T00:46:11,171 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43359-0x100081728f20001, quorum=127.0.0.1:51763, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T00:46:11,171 INFO [RS:0;1bae0942de96:43359 {}] regionserver.HRegionServer(1031): Exiting; stopping=1bae0942de96,43359,1733705166853; zookeeper connection closed. 2024-12-09T00:46:11,172 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43359-0x100081728f20001, quorum=127.0.0.1:51763, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T00:46:11,172 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@2ac161c8 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@2ac161c8 2024-12-09T00:46:11,172 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 3 regionserver(s) complete 2024-12-09T00:46:11,173 DEBUG [M:0;1bae0942de96:46663 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44353/user/jenkins/test-data/3928cabf-301a-0f2c-49c9-564d3af68bd3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/905ee1e5592141c485906b6054173868 is 748, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1733705169798/Put/seqid=0 2024-12-09T00:46:11,175 WARN [M:0;1bae0942de96:46663 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T00:46:11,175 WARN [M:0;1bae0942de96:46663 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T00:46:11,178 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1006682308_22 at /127.0.0.1:45832 [Receiving block BP-542836953-172.17.0.2-1733705162438:blk_-9223372036854775552_1036] {}] datanode.DataXceiver(331): 127.0.0.1:36967:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45832 dst: /127.0.0.1:36967 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T00:46:11,182 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36967 is added to blk_-9223372036854775552_1037 (size=6440) 2024-12-09T00:46:11,182 WARN [M:0;1bae0942de96:46663 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-09T00:46:11,183 INFO [M:0;1bae0942de96:46663 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.15 KB at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:44353/user/jenkins/test-data/3928cabf-301a-0f2c-49c9-564d3af68bd3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/905ee1e5592141c485906b6054173868 2024-12-09T00:46:11,190 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36967 is added to blk_-9223372036854775772_1004 (size=42) 2024-12-09T00:46:11,190 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39493 is added to blk_-9223372036854775773_1004 (size=42) 2024-12-09T00:46:11,203 DEBUG [M:0;1bae0942de96:46663 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44353/user/jenkins/test-data/3928cabf-301a-0f2c-49c9-564d3af68bd3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/330f53e33b684c26908c9c1fe105d5c7 is 69, key is 1bae0942de96,38143,1733705167014/rs:state/1733705168185/Put/seqid=0 2024-12-09T00:46:11,206 WARN [M:0;1bae0942de96:46663 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T00:46:11,206 WARN [M:0;1bae0942de96:46663 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T00:46:11,208 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1006682308_22 at /127.0.0.1:51268 [Receiving block BP-542836953-172.17.0.2-1733705162438:blk_-9223372036854775536_1038] {}] datanode.DataXceiver(331): 127.0.0.1:39493:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51268 dst: /127.0.0.1:39493 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T00:46:11,212 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39493 is added to blk_-9223372036854775536_1039 (size=5294) 2024-12-09T00:46:11,213 WARN [M:0;1bae0942de96:46663 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-09T00:46:11,213 INFO [M:0;1bae0942de96:46663 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=195 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:44353/user/jenkins/test-data/3928cabf-301a-0f2c-49c9-564d3af68bd3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/330f53e33b684c26908c9c1fe105d5c7 2024-12-09T00:46:11,221 DEBUG [M:0;1bae0942de96:46663 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44353/user/jenkins/test-data/3928cabf-301a-0f2c-49c9-564d3af68bd3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/e04dc6a8086e45839d426c2de3505506 as hdfs://localhost:44353/user/jenkins/test-data/3928cabf-301a-0f2c-49c9-564d3af68bd3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/e04dc6a8086e45839d426c2de3505506 2024-12-09T00:46:11,230 INFO [M:0;1bae0942de96:46663 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44353/user/jenkins/test-data/3928cabf-301a-0f2c-49c9-564d3af68bd3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/e04dc6a8086e45839d426c2de3505506, entries=8, sequenceid=72, filesize=5.5 K 2024-12-09T00:46:11,232 DEBUG [M:0;1bae0942de96:46663 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44353/user/jenkins/test-data/3928cabf-301a-0f2c-49c9-564d3af68bd3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/905ee1e5592141c485906b6054173868 as hdfs://localhost:44353/user/jenkins/test-data/3928cabf-301a-0f2c-49c9-564d3af68bd3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/905ee1e5592141c485906b6054173868 2024-12-09T00:46:11,240 INFO [M:0;1bae0942de96:46663 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44353/user/jenkins/test-data/3928cabf-301a-0f2c-49c9-564d3af68bd3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/905ee1e5592141c485906b6054173868, entries=8, sequenceid=72, filesize=6.3 K 2024-12-09T00:46:11,241 DEBUG [M:0;1bae0942de96:46663 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44353/user/jenkins/test-data/3928cabf-301a-0f2c-49c9-564d3af68bd3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/330f53e33b684c26908c9c1fe105d5c7 as hdfs://localhost:44353/user/jenkins/test-data/3928cabf-301a-0f2c-49c9-564d3af68bd3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/330f53e33b684c26908c9c1fe105d5c7 2024-12-09T00:46:11,249 INFO [M:0;1bae0942de96:46663 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44353/user/jenkins/test-data/3928cabf-301a-0f2c-49c9-564d3af68bd3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/330f53e33b684c26908c9c1fe105d5c7, entries=3, sequenceid=72, filesize=5.2 K 2024-12-09T00:46:11,251 INFO [M:0;1bae0942de96:46663 {}] regionserver.HRegion(3140): Finished flush of dataSize ~26.84 KB/27480, heapSize ~33.83 KB/34640, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 144ms, sequenceid=72, compaction requested=false 2024-12-09T00:46:11,252 INFO [M:0;1bae0942de96:46663 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T00:46:11,252 DEBUG [M:0;1bae0942de96:46663 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733705171106Disabling compacts and flushes for region at 1733705171106Disabling writes for close at 1733705171107 (+1 ms)Obtaining lock to block concurrent updates at 1733705171107Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733705171107Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=27480, getHeapSize=34880, getOffHeapSize=0, getCellsCount=85 at 1733705171108 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733705171109 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733705171109Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733705171138 (+29 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733705171138Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733705171156 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733705171173 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733705171173Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733705171190 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733705171203 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733705171203Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@65227936: reopening flushed file at 1733705171220 (+17 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@361f54aa: reopening flushed file at 1733705171230 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@11f4b8f3: reopening flushed file at 1733705171240 (+10 ms)Finished flush of dataSize ~26.84 KB/27480, heapSize ~33.83 KB/34640, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 144ms, sequenceid=72, compaction requested=false at 1733705171251 (+11 ms)Writing region close event to WAL at 1733705171252 (+1 ms)Closed at 1733705171252 2024-12-09T00:46:11,256 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36967 is added to blk_1073741825_1011 (size=32683) 2024-12-09T00:46:11,256 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36209 is added to blk_1073741825_1011 (size=32683) 2024-12-09T00:46:11,256 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39493 is added to blk_1073741825_1011 (size=32683) 2024-12-09T00:46:11,257 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-09T00:46:11,257 INFO [M:0;1bae0942de96:46663 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-09T00:46:11,257 INFO [M:0;1bae0942de96:46663 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:46663 2024-12-09T00:46:11,257 INFO [M:0;1bae0942de96:46663 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-09T00:46:11,380 INFO [M:0;1bae0942de96:46663 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-09T00:46:11,380 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46663-0x100081728f20000, quorum=127.0.0.1:51763, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T00:46:11,381 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46663-0x100081728f20000, quorum=127.0.0.1:51763, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T00:46:11,416 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@17f8e572{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T00:46:11,419 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@11aaae40{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T00:46:11,419 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T00:46:11,419 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@14c1b227{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T00:46:11,420 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@28d0ee11{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fd1ddd71-8f9f-7adc-80ee-28e06904eeaf/hadoop.log.dir/,STOPPED} 2024-12-09T00:46:11,423 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T00:46:11,423 WARN [BP-542836953-172.17.0.2-1733705162438 heartbeating to localhost/127.0.0.1:44353 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T00:46:11,424 WARN [BP-542836953-172.17.0.2-1733705162438 heartbeating to localhost/127.0.0.1:44353 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-542836953-172.17.0.2-1733705162438 (Datanode Uuid 1c0c990f-5ffc-4089-90ab-61a1a5e1269a) service to localhost/127.0.0.1:44353 2024-12-09T00:46:11,424 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T00:46:11,425 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fd1ddd71-8f9f-7adc-80ee-28e06904eeaf/cluster_2206ffd7-7a5c-39b4-24d8-aeab961c1001/data/data5/current/BP-542836953-172.17.0.2-1733705162438 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T00:46:11,425 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fd1ddd71-8f9f-7adc-80ee-28e06904eeaf/cluster_2206ffd7-7a5c-39b4-24d8-aeab961c1001/data/data6/current/BP-542836953-172.17.0.2-1733705162438 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T00:46:11,425 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T00:46:11,427 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@133f1bad{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T00:46:11,427 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@39860596{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T00:46:11,427 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T00:46:11,428 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@125705fb{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T00:46:11,428 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@50510811{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fd1ddd71-8f9f-7adc-80ee-28e06904eeaf/hadoop.log.dir/,STOPPED} 2024-12-09T00:46:11,429 WARN [BP-542836953-172.17.0.2-1733705162438 heartbeating to localhost/127.0.0.1:44353 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T00:46:11,429 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T00:46:11,429 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T00:46:11,429 WARN [BP-542836953-172.17.0.2-1733705162438 heartbeating to localhost/127.0.0.1:44353 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-542836953-172.17.0.2-1733705162438 (Datanode Uuid 5161c013-0cec-4da8-b2fa-50669b93914d) service to localhost/127.0.0.1:44353 2024-12-09T00:46:11,430 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fd1ddd71-8f9f-7adc-80ee-28e06904eeaf/cluster_2206ffd7-7a5c-39b4-24d8-aeab961c1001/data/data3/current/BP-542836953-172.17.0.2-1733705162438 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T00:46:11,430 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fd1ddd71-8f9f-7adc-80ee-28e06904eeaf/cluster_2206ffd7-7a5c-39b4-24d8-aeab961c1001/data/data4/current/BP-542836953-172.17.0.2-1733705162438 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T00:46:11,430 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T00:46:11,432 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3990ff75{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T00:46:11,432 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@48731e1b{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T00:46:11,432 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T00:46:11,432 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4f02cc61{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T00:46:11,432 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@179ed6d0{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fd1ddd71-8f9f-7adc-80ee-28e06904eeaf/hadoop.log.dir/,STOPPED} 2024-12-09T00:46:11,434 WARN [BP-542836953-172.17.0.2-1733705162438 heartbeating to localhost/127.0.0.1:44353 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T00:46:11,434 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T00:46:11,434 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T00:46:11,434 WARN [BP-542836953-172.17.0.2-1733705162438 heartbeating to localhost/127.0.0.1:44353 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-542836953-172.17.0.2-1733705162438 (Datanode Uuid 68577e05-384d-4a9c-811c-e6eb1d65658e) service to localhost/127.0.0.1:44353 2024-12-09T00:46:11,434 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fd1ddd71-8f9f-7adc-80ee-28e06904eeaf/cluster_2206ffd7-7a5c-39b4-24d8-aeab961c1001/data/data1/current/BP-542836953-172.17.0.2-1733705162438 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T00:46:11,434 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fd1ddd71-8f9f-7adc-80ee-28e06904eeaf/cluster_2206ffd7-7a5c-39b4-24d8-aeab961c1001/data/data2/current/BP-542836953-172.17.0.2-1733705162438 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T00:46:11,435 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T00:46:11,442 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5cb83937{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-09T00:46:11,443 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@69b5b273{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T00:46:11,443 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T00:46:11,443 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3cbd6fd{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T00:46:11,443 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@b03c34d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fd1ddd71-8f9f-7adc-80ee-28e06904eeaf/hadoop.log.dir/,STOPPED} 2024-12-09T00:46:11,451 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-09T00:46:11,478 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-09T00:46:11,484 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestHBaseWalOnEC#testReadWrite[0] Thread=89 (was 157), OpenFileDescriptor=439 (was 393) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=136 (was 139), ProcessCount=11 (was 11), AvailableMemoryMB=16226 (was 16526) 2024-12-09T00:46:11,489 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestHBaseWalOnEC#testReadWrite[1] Thread=89, OpenFileDescriptor=439, MaxFileDescriptor=1048576, SystemLoadAverage=136, ProcessCount=11, AvailableMemoryMB=16226 2024-12-09T00:46:11,490 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=3, rsPorts=, rsClass=null, numDataNodes=3, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-09T00:46:11,490 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fd1ddd71-8f9f-7adc-80ee-28e06904eeaf/hadoop.log.dir so I do NOT create it in target/test-data/396c7edd-89ac-c2ef-b5b3-e3f3c783bb3e 2024-12-09T00:46:11,490 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fd1ddd71-8f9f-7adc-80ee-28e06904eeaf/hadoop.tmp.dir so I do NOT create it in target/test-data/396c7edd-89ac-c2ef-b5b3-e3f3c783bb3e 2024-12-09T00:46:11,490 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/396c7edd-89ac-c2ef-b5b3-e3f3c783bb3e/cluster_b0de736a-8fdc-4c60-9559-c61ad70016c9, deleteOnExit=true 2024-12-09T00:46:11,490 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-09T00:46:11,490 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/396c7edd-89ac-c2ef-b5b3-e3f3c783bb3e/test.cache.data in system properties and HBase conf 2024-12-09T00:46:11,490 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/396c7edd-89ac-c2ef-b5b3-e3f3c783bb3e/hadoop.tmp.dir in system properties and HBase conf 2024-12-09T00:46:11,490 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/396c7edd-89ac-c2ef-b5b3-e3f3c783bb3e/hadoop.log.dir in system properties and HBase conf 2024-12-09T00:46:11,490 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/396c7edd-89ac-c2ef-b5b3-e3f3c783bb3e/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-09T00:46:11,490 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/396c7edd-89ac-c2ef-b5b3-e3f3c783bb3e/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-09T00:46:11,491 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-09T00:46:11,491 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-09T00:46:11,491 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/396c7edd-89ac-c2ef-b5b3-e3f3c783bb3e/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-09T00:46:11,491 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/396c7edd-89ac-c2ef-b5b3-e3f3c783bb3e/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-09T00:46:11,491 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/396c7edd-89ac-c2ef-b5b3-e3f3c783bb3e/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-09T00:46:11,491 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/396c7edd-89ac-c2ef-b5b3-e3f3c783bb3e/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-09T00:46:11,491 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/396c7edd-89ac-c2ef-b5b3-e3f3c783bb3e/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-09T00:46:11,491 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/396c7edd-89ac-c2ef-b5b3-e3f3c783bb3e/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-09T00:46:11,492 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/396c7edd-89ac-c2ef-b5b3-e3f3c783bb3e/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-09T00:46:11,492 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/396c7edd-89ac-c2ef-b5b3-e3f3c783bb3e/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-09T00:46:11,492 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/396c7edd-89ac-c2ef-b5b3-e3f3c783bb3e/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-09T00:46:11,492 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/396c7edd-89ac-c2ef-b5b3-e3f3c783bb3e/nfs.dump.dir in system properties and HBase conf 2024-12-09T00:46:11,492 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/396c7edd-89ac-c2ef-b5b3-e3f3c783bb3e/java.io.tmpdir in system properties and HBase conf 2024-12-09T00:46:11,492 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/396c7edd-89ac-c2ef-b5b3-e3f3c783bb3e/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-09T00:46:11,492 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/396c7edd-89ac-c2ef-b5b3-e3f3c783bb3e/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-09T00:46:11,492 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/396c7edd-89ac-c2ef-b5b3-e3f3c783bb3e/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-09T00:46:11,737 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T00:46:11,741 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T00:46:11,743 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T00:46:11,743 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T00:46:11,743 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-09T00:46:11,744 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T00:46:11,744 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4c686d6e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/396c7edd-89ac-c2ef-b5b3-e3f3c783bb3e/hadoop.log.dir/,AVAILABLE} 2024-12-09T00:46:11,745 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5574eaf2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T00:46:11,836 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5609bdf8{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/396c7edd-89ac-c2ef-b5b3-e3f3c783bb3e/java.io.tmpdir/jetty-localhost-37359-hadoop-hdfs-3_4_1-tests_jar-_-any-2314048594874947401/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-09T00:46:11,837 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7cbf28d2{HTTP/1.1, (http/1.1)}{localhost:37359} 2024-12-09T00:46:11,837 INFO [Time-limited test {}] server.Server(415): Started @11031ms 2024-12-09T00:46:12,075 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T00:46:12,078 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T00:46:12,079 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T00:46:12,079 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T00:46:12,079 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-09T00:46:12,079 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6c411bc1{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/396c7edd-89ac-c2ef-b5b3-e3f3c783bb3e/hadoop.log.dir/,AVAILABLE} 2024-12-09T00:46:12,079 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@d63bfae{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T00:46:12,168 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@484ab650{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/396c7edd-89ac-c2ef-b5b3-e3f3c783bb3e/java.io.tmpdir/jetty-localhost-35133-hadoop-hdfs-3_4_1-tests_jar-_-any-7725535215845703960/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T00:46:12,169 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1d09d7d5{HTTP/1.1, (http/1.1)}{localhost:35133} 2024-12-09T00:46:12,169 INFO [Time-limited test {}] server.Server(415): Started @11363ms 2024-12-09T00:46:12,170 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T00:46:12,200 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T00:46:12,202 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T00:46:12,203 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T00:46:12,203 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T00:46:12,203 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-09T00:46:12,205 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@17cd1c9f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/396c7edd-89ac-c2ef-b5b3-e3f3c783bb3e/hadoop.log.dir/,AVAILABLE} 2024-12-09T00:46:12,205 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2b2d9a02{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T00:46:12,293 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@569dbdc8{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/396c7edd-89ac-c2ef-b5b3-e3f3c783bb3e/java.io.tmpdir/jetty-localhost-37457-hadoop-hdfs-3_4_1-tests_jar-_-any-13265114891068536230/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T00:46:12,293 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@18c4cb78{HTTP/1.1, (http/1.1)}{localhost:37457} 2024-12-09T00:46:12,293 INFO [Time-limited test {}] server.Server(415): Started @11487ms 2024-12-09T00:46:12,294 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T00:46:12,326 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T00:46:12,328 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T00:46:12,329 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T00:46:12,329 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T00:46:12,329 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-09T00:46:12,330 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@25de4fb7{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/396c7edd-89ac-c2ef-b5b3-e3f3c783bb3e/hadoop.log.dir/,AVAILABLE} 2024-12-09T00:46:12,330 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@498db938{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T00:46:12,423 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@51eb0253{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/396c7edd-89ac-c2ef-b5b3-e3f3c783bb3e/java.io.tmpdir/jetty-localhost-36071-hadoop-hdfs-3_4_1-tests_jar-_-any-17955011375128150355/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T00:46:12,424 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@38066021{HTTP/1.1, (http/1.1)}{localhost:36071} 2024-12-09T00:46:12,424 INFO [Time-limited test {}] server.Server(415): Started @11618ms 2024-12-09T00:46:12,425 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T00:46:12,982 WARN [Thread-563 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/396c7edd-89ac-c2ef-b5b3-e3f3c783bb3e/cluster_b0de736a-8fdc-4c60-9559-c61ad70016c9/data/data1/current/BP-1723240308-172.17.0.2-1733705171515/current, will proceed with Du for space computation calculation, 2024-12-09T00:46:12,983 WARN [Thread-564 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/396c7edd-89ac-c2ef-b5b3-e3f3c783bb3e/cluster_b0de736a-8fdc-4c60-9559-c61ad70016c9/data/data2/current/BP-1723240308-172.17.0.2-1733705171515/current, will proceed with Du for space computation calculation, 2024-12-09T00:46:12,998 WARN [Thread-504 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T00:46:13,000 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x67906ae15e04bf93 with lease ID 0x8b8487b957f9a460: Processing first storage report for DS-f662bffa-cb7d-48ef-a787-a690b817d88b from datanode DatanodeRegistration(127.0.0.1:46307, datanodeUuid=065a2436-516a-4302-aee5-870871bb86da, infoPort=34799, infoSecurePort=0, ipcPort=34957, storageInfo=lv=-57;cid=testClusterID;nsid=73814160;c=1733705171515) 2024-12-09T00:46:13,000 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x67906ae15e04bf93 with lease ID 0x8b8487b957f9a460: from storage DS-f662bffa-cb7d-48ef-a787-a690b817d88b node DatanodeRegistration(127.0.0.1:46307, datanodeUuid=065a2436-516a-4302-aee5-870871bb86da, infoPort=34799, infoSecurePort=0, ipcPort=34957, storageInfo=lv=-57;cid=testClusterID;nsid=73814160;c=1733705171515), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T00:46:13,000 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x67906ae15e04bf93 with lease ID 0x8b8487b957f9a460: Processing first storage report for DS-b7634fa5-0d80-49dc-b85d-b531a3f43885 from datanode DatanodeRegistration(127.0.0.1:46307, datanodeUuid=065a2436-516a-4302-aee5-870871bb86da, infoPort=34799, infoSecurePort=0, ipcPort=34957, storageInfo=lv=-57;cid=testClusterID;nsid=73814160;c=1733705171515) 2024-12-09T00:46:13,000 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x67906ae15e04bf93 with lease ID 0x8b8487b957f9a460: from storage DS-b7634fa5-0d80-49dc-b85d-b531a3f43885 node DatanodeRegistration(127.0.0.1:46307, datanodeUuid=065a2436-516a-4302-aee5-870871bb86da, infoPort=34799, infoSecurePort=0, ipcPort=34957, storageInfo=lv=-57;cid=testClusterID;nsid=73814160;c=1733705171515), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T00:46:13,218 WARN [Thread-575 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/396c7edd-89ac-c2ef-b5b3-e3f3c783bb3e/cluster_b0de736a-8fdc-4c60-9559-c61ad70016c9/data/data3/current/BP-1723240308-172.17.0.2-1733705171515/current, will proceed with Du for space computation calculation, 2024-12-09T00:46:13,219 WARN [Thread-576 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/396c7edd-89ac-c2ef-b5b3-e3f3c783bb3e/cluster_b0de736a-8fdc-4c60-9559-c61ad70016c9/data/data4/current/BP-1723240308-172.17.0.2-1733705171515/current, will proceed with Du for space computation calculation, 2024-12-09T00:46:13,242 WARN [Thread-527 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T00:46:13,244 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xf86ce14c82c4d1ce with lease ID 0x8b8487b957f9a461: Processing first storage report for DS-ec881784-e882-4d5e-9105-e5d999a9f29a from datanode DatanodeRegistration(127.0.0.1:33741, datanodeUuid=fcaf4034-57d0-4e71-ae2b-d23be958284f, infoPort=40055, infoSecurePort=0, ipcPort=45733, storageInfo=lv=-57;cid=testClusterID;nsid=73814160;c=1733705171515) 2024-12-09T00:46:13,245 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf86ce14c82c4d1ce with lease ID 0x8b8487b957f9a461: from storage DS-ec881784-e882-4d5e-9105-e5d999a9f29a node DatanodeRegistration(127.0.0.1:33741, datanodeUuid=fcaf4034-57d0-4e71-ae2b-d23be958284f, infoPort=40055, infoSecurePort=0, ipcPort=45733, storageInfo=lv=-57;cid=testClusterID;nsid=73814160;c=1733705171515), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T00:46:13,245 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xf86ce14c82c4d1ce with lease ID 0x8b8487b957f9a461: Processing first storage report for DS-b2077e5c-9ec2-417e-a9fe-90c53b95d711 from datanode DatanodeRegistration(127.0.0.1:33741, datanodeUuid=fcaf4034-57d0-4e71-ae2b-d23be958284f, infoPort=40055, infoSecurePort=0, ipcPort=45733, storageInfo=lv=-57;cid=testClusterID;nsid=73814160;c=1733705171515) 2024-12-09T00:46:13,245 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf86ce14c82c4d1ce with lease ID 0x8b8487b957f9a461: from storage DS-b2077e5c-9ec2-417e-a9fe-90c53b95d711 node DatanodeRegistration(127.0.0.1:33741, datanodeUuid=fcaf4034-57d0-4e71-ae2b-d23be958284f, infoPort=40055, infoSecurePort=0, ipcPort=45733, storageInfo=lv=-57;cid=testClusterID;nsid=73814160;c=1733705171515), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T00:46:13,309 WARN [Thread-586 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/396c7edd-89ac-c2ef-b5b3-e3f3c783bb3e/cluster_b0de736a-8fdc-4c60-9559-c61ad70016c9/data/data5/current/BP-1723240308-172.17.0.2-1733705171515/current, will proceed with Du for space computation calculation, 2024-12-09T00:46:13,310 WARN [Thread-587 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/396c7edd-89ac-c2ef-b5b3-e3f3c783bb3e/cluster_b0de736a-8fdc-4c60-9559-c61ad70016c9/data/data6/current/BP-1723240308-172.17.0.2-1733705171515/current, will proceed with Du for space computation calculation, 2024-12-09T00:46:13,330 WARN [Thread-549 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T00:46:13,333 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xbde94a9a730e1075 with lease ID 0x8b8487b957f9a462: Processing first storage report for DS-e0d7fc1f-0b80-4ec3-927b-2aa430149f5a from datanode DatanodeRegistration(127.0.0.1:37429, datanodeUuid=7f24502c-5557-4dd3-bb62-2fccb3b691cd, infoPort=37211, infoSecurePort=0, ipcPort=38483, storageInfo=lv=-57;cid=testClusterID;nsid=73814160;c=1733705171515) 2024-12-09T00:46:13,333 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xbde94a9a730e1075 with lease ID 0x8b8487b957f9a462: from storage DS-e0d7fc1f-0b80-4ec3-927b-2aa430149f5a node DatanodeRegistration(127.0.0.1:37429, datanodeUuid=7f24502c-5557-4dd3-bb62-2fccb3b691cd, infoPort=37211, infoSecurePort=0, ipcPort=38483, storageInfo=lv=-57;cid=testClusterID;nsid=73814160;c=1733705171515), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T00:46:13,333 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xbde94a9a730e1075 with lease ID 0x8b8487b957f9a462: Processing first storage report for DS-0ce22c5d-7eeb-43c2-926f-525f82034665 from datanode DatanodeRegistration(127.0.0.1:37429, datanodeUuid=7f24502c-5557-4dd3-bb62-2fccb3b691cd, infoPort=37211, infoSecurePort=0, ipcPort=38483, storageInfo=lv=-57;cid=testClusterID;nsid=73814160;c=1733705171515) 2024-12-09T00:46:13,333 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xbde94a9a730e1075 with lease ID 0x8b8487b957f9a462: from storage DS-0ce22c5d-7eeb-43c2-926f-525f82034665 node DatanodeRegistration(127.0.0.1:37429, datanodeUuid=7f24502c-5557-4dd3-bb62-2fccb3b691cd, infoPort=37211, infoSecurePort=0, ipcPort=38483, storageInfo=lv=-57;cid=testClusterID;nsid=73814160;c=1733705171515), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T00:46:13,363 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/396c7edd-89ac-c2ef-b5b3-e3f3c783bb3e 2024-12-09T00:46:13,367 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/396c7edd-89ac-c2ef-b5b3-e3f3c783bb3e/cluster_b0de736a-8fdc-4c60-9559-c61ad70016c9/zookeeper_0, clientPort=63217, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/396c7edd-89ac-c2ef-b5b3-e3f3c783bb3e/cluster_b0de736a-8fdc-4c60-9559-c61ad70016c9/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/396c7edd-89ac-c2ef-b5b3-e3f3c783bb3e/cluster_b0de736a-8fdc-4c60-9559-c61ad70016c9/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-09T00:46:13,368 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=63217 2024-12-09T00:46:13,368 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T00:46:13,370 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T00:46:13,383 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46307 is added to blk_1073741825_1001 (size=7) 2024-12-09T00:46:13,383 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37429 is added to blk_1073741825_1001 (size=7) 2024-12-09T00:46:13,384 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33741 is added to blk_1073741825_1001 (size=7) 2024-12-09T00:46:13,385 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:37501/user/jenkins/test-data/e75fb8a5-12ed-cfb0-e8ca-8c0181dda140 with version=8 2024-12-09T00:46:13,385 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:44353/user/jenkins/test-data/3928cabf-301a-0f2c-49c9-564d3af68bd3/hbase-staging 2024-12-09T00:46:13,387 INFO [Time-limited test {}] client.ConnectionUtils(128): master/1bae0942de96:0 server-side Connection retries=45 2024-12-09T00:46:13,387 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T00:46:13,387 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-09T00:46:13,387 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-09T00:46:13,387 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T00:46:13,387 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-09T00:46:13,387 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-09T00:46:13,388 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-09T00:46:13,388 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:38957 2024-12-09T00:46:13,390 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:38957 connecting to ZooKeeper ensemble=127.0.0.1:63217 2024-12-09T00:46:13,422 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:389570x0, quorum=127.0.0.1:63217, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-09T00:46:13,423 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:38957-0x100081748120000 connected 2024-12-09T00:46:13,489 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T00:46:13,493 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T00:46:13,496 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:38957-0x100081748120000, quorum=127.0.0.1:63217, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T00:46:13,497 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:37501/user/jenkins/test-data/e75fb8a5-12ed-cfb0-e8ca-8c0181dda140, hbase.cluster.distributed=false 2024-12-09T00:46:13,499 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:38957-0x100081748120000, quorum=127.0.0.1:63217, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-09T00:46:13,500 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=38957 2024-12-09T00:46:13,500 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=38957 2024-12-09T00:46:13,500 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=38957 2024-12-09T00:46:13,501 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=38957 2024-12-09T00:46:13,501 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=38957 2024-12-09T00:46:13,517 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/1bae0942de96:0 server-side Connection retries=45 2024-12-09T00:46:13,518 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T00:46:13,518 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-09T00:46:13,518 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-09T00:46:13,518 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T00:46:13,518 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-09T00:46:13,518 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-09T00:46:13,518 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-09T00:46:13,519 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:36541 2024-12-09T00:46:13,520 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:36541 connecting to ZooKeeper ensemble=127.0.0.1:63217 2024-12-09T00:46:13,521 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T00:46:13,522 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T00:46:13,529 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:365410x0, quorum=127.0.0.1:63217, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-09T00:46:13,530 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:365410x0, quorum=127.0.0.1:63217, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T00:46:13,530 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:36541-0x100081748120001 connected 2024-12-09T00:46:13,530 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-09T00:46:13,531 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-09T00:46:13,531 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36541-0x100081748120001, quorum=127.0.0.1:63217, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-09T00:46:13,532 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36541-0x100081748120001, quorum=127.0.0.1:63217, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-09T00:46:13,533 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=36541 2024-12-09T00:46:13,533 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=36541 2024-12-09T00:46:13,533 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=36541 2024-12-09T00:46:13,534 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=36541 2024-12-09T00:46:13,534 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=36541 2024-12-09T00:46:13,549 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/1bae0942de96:0 server-side Connection retries=45 2024-12-09T00:46:13,549 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T00:46:13,549 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-09T00:46:13,549 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-09T00:46:13,549 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T00:46:13,549 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-09T00:46:13,549 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-09T00:46:13,550 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-09T00:46:13,550 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:32815 2024-12-09T00:46:13,551 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:32815 connecting to ZooKeeper ensemble=127.0.0.1:63217 2024-12-09T00:46:13,552 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T00:46:13,554 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T00:46:13,563 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:328150x0, quorum=127.0.0.1:63217, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-09T00:46:13,563 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:32815-0x100081748120002, quorum=127.0.0.1:63217, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T00:46:13,563 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:32815-0x100081748120002 connected 2024-12-09T00:46:13,563 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-09T00:46:13,564 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-09T00:46:13,565 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:32815-0x100081748120002, quorum=127.0.0.1:63217, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-09T00:46:13,566 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:32815-0x100081748120002, quorum=127.0.0.1:63217, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-09T00:46:13,566 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=32815 2024-12-09T00:46:13,566 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=32815 2024-12-09T00:46:13,566 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=32815 2024-12-09T00:46:13,567 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=32815 2024-12-09T00:46:13,567 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=32815 2024-12-09T00:46:13,582 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/1bae0942de96:0 server-side Connection retries=45 2024-12-09T00:46:13,582 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T00:46:13,582 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-09T00:46:13,582 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-09T00:46:13,582 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T00:46:13,582 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-09T00:46:13,582 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-09T00:46:13,583 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-09T00:46:13,583 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:33411 2024-12-09T00:46:13,584 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:33411 connecting to ZooKeeper ensemble=127.0.0.1:63217 2024-12-09T00:46:13,585 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T00:46:13,586 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T00:46:13,596 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:334110x0, quorum=127.0.0.1:63217, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-09T00:46:13,596 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33411-0x100081748120003, quorum=127.0.0.1:63217, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T00:46:13,596 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:33411-0x100081748120003 connected 2024-12-09T00:46:13,597 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-09T00:46:13,598 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-09T00:46:13,598 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33411-0x100081748120003, quorum=127.0.0.1:63217, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-09T00:46:13,600 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33411-0x100081748120003, quorum=127.0.0.1:63217, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-09T00:46:13,601 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=33411 2024-12-09T00:46:13,601 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=33411 2024-12-09T00:46:13,601 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=33411 2024-12-09T00:46:13,602 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=33411 2024-12-09T00:46:13,602 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=33411 2024-12-09T00:46:13,615 DEBUG [M:0;1bae0942de96:38957 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;1bae0942de96:38957 2024-12-09T00:46:13,615 INFO [master/1bae0942de96:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/1bae0942de96,38957,1733705173387 2024-12-09T00:46:13,621 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33411-0x100081748120003, quorum=127.0.0.1:63217, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T00:46:13,621 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36541-0x100081748120001, quorum=127.0.0.1:63217, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T00:46:13,621 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32815-0x100081748120002, quorum=127.0.0.1:63217, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T00:46:13,621 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38957-0x100081748120000, quorum=127.0.0.1:63217, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T00:46:13,621 DEBUG [master/1bae0942de96:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:38957-0x100081748120000, quorum=127.0.0.1:63217, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/1bae0942de96,38957,1733705173387 2024-12-09T00:46:13,629 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33411-0x100081748120003, quorum=127.0.0.1:63217, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-09T00:46:13,629 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36541-0x100081748120001, quorum=127.0.0.1:63217, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-09T00:46:13,629 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32815-0x100081748120002, quorum=127.0.0.1:63217, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-09T00:46:13,629 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38957-0x100081748120000, quorum=127.0.0.1:63217, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T00:46:13,629 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33411-0x100081748120003, quorum=127.0.0.1:63217, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T00:46:13,629 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36541-0x100081748120001, quorum=127.0.0.1:63217, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T00:46:13,629 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32815-0x100081748120002, quorum=127.0.0.1:63217, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T00:46:13,630 DEBUG [master/1bae0942de96:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:38957-0x100081748120000, quorum=127.0.0.1:63217, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-09T00:46:13,631 INFO [master/1bae0942de96:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/1bae0942de96,38957,1733705173387 from backup master directory 2024-12-09T00:46:13,637 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38957-0x100081748120000, quorum=127.0.0.1:63217, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/1bae0942de96,38957,1733705173387 2024-12-09T00:46:13,637 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36541-0x100081748120001, quorum=127.0.0.1:63217, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T00:46:13,637 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32815-0x100081748120002, quorum=127.0.0.1:63217, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T00:46:13,637 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33411-0x100081748120003, quorum=127.0.0.1:63217, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T00:46:13,637 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38957-0x100081748120000, quorum=127.0.0.1:63217, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T00:46:13,638 WARN [master/1bae0942de96:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-09T00:46:13,638 INFO [master/1bae0942de96:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=1bae0942de96,38957,1733705173387 2024-12-09T00:46:13,644 DEBUG [master/1bae0942de96:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:37501/user/jenkins/test-data/e75fb8a5-12ed-cfb0-e8ca-8c0181dda140/hbase.id] with ID: 423cde5c-15e4-4c00-97ef-b54d75535bdf 2024-12-09T00:46:13,644 DEBUG [master/1bae0942de96:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:37501/user/jenkins/test-data/e75fb8a5-12ed-cfb0-e8ca-8c0181dda140/.tmp/hbase.id 2024-12-09T00:46:13,655 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46307 is added to blk_1073741826_1002 (size=42) 2024-12-09T00:46:13,656 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33741 is added to blk_1073741826_1002 (size=42) 2024-12-09T00:46:13,656 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37429 is added to blk_1073741826_1002 (size=42) 2024-12-09T00:46:13,657 DEBUG [master/1bae0942de96:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:37501/user/jenkins/test-data/e75fb8a5-12ed-cfb0-e8ca-8c0181dda140/.tmp/hbase.id]:[hdfs://localhost:37501/user/jenkins/test-data/e75fb8a5-12ed-cfb0-e8ca-8c0181dda140/hbase.id] 2024-12-09T00:46:13,671 INFO [master/1bae0942de96:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T00:46:13,672 INFO [master/1bae0942de96:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-09T00:46:13,673 INFO [master/1bae0942de96:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-12-09T00:46:13,679 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38957-0x100081748120000, quorum=127.0.0.1:63217, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T00:46:13,679 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32815-0x100081748120002, quorum=127.0.0.1:63217, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T00:46:13,679 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33411-0x100081748120003, quorum=127.0.0.1:63217, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T00:46:13,679 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36541-0x100081748120001, quorum=127.0.0.1:63217, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T00:46:13,690 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37429 is added to blk_1073741827_1003 (size=196) 2024-12-09T00:46:13,691 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46307 is added to blk_1073741827_1003 (size=196) 2024-12-09T00:46:13,691 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33741 is added to blk_1073741827_1003 (size=196) 2024-12-09T00:46:13,692 INFO [master/1bae0942de96:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-09T00:46:13,692 INFO [master/1bae0942de96:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-09T00:46:13,693 INFO [master/1bae0942de96:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-09T00:46:13,705 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46307 is added to blk_1073741828_1004 (size=1189) 2024-12-09T00:46:13,705 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37429 is added to blk_1073741828_1004 (size=1189) 2024-12-09T00:46:13,705 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33741 is added to blk_1073741828_1004 (size=1189) 2024-12-09T00:46:13,707 INFO [master/1bae0942de96:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:37501/user/jenkins/test-data/e75fb8a5-12ed-cfb0-e8ca-8c0181dda140/MasterData/data/master/store 2024-12-09T00:46:13,716 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33741 is added to blk_1073741829_1005 (size=34) 2024-12-09T00:46:13,716 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46307 is added to blk_1073741829_1005 (size=34) 2024-12-09T00:46:13,717 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37429 is added to blk_1073741829_1005 (size=34) 2024-12-09T00:46:13,717 DEBUG [master/1bae0942de96:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T00:46:13,717 DEBUG [master/1bae0942de96:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-09T00:46:13,718 INFO [master/1bae0942de96:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T00:46:13,718 DEBUG [master/1bae0942de96:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T00:46:13,718 DEBUG [master/1bae0942de96:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-09T00:46:13,718 DEBUG [master/1bae0942de96:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T00:46:13,718 INFO [master/1bae0942de96:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T00:46:13,718 DEBUG [master/1bae0942de96:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733705173717Disabling compacts and flushes for region at 1733705173717Disabling writes for close at 1733705173718 (+1 ms)Writing region close event to WAL at 1733705173718Closed at 1733705173718 2024-12-09T00:46:13,719 WARN [master/1bae0942de96:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:37501/user/jenkins/test-data/e75fb8a5-12ed-cfb0-e8ca-8c0181dda140/MasterData/data/master/store/.initializing 2024-12-09T00:46:13,719 DEBUG [master/1bae0942de96:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:37501/user/jenkins/test-data/e75fb8a5-12ed-cfb0-e8ca-8c0181dda140/MasterData/WALs/1bae0942de96,38957,1733705173387 2024-12-09T00:46:13,722 INFO [master/1bae0942de96:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=1bae0942de96%2C38957%2C1733705173387, suffix=, logDir=hdfs://localhost:37501/user/jenkins/test-data/e75fb8a5-12ed-cfb0-e8ca-8c0181dda140/MasterData/WALs/1bae0942de96,38957,1733705173387, archiveDir=hdfs://localhost:37501/user/jenkins/test-data/e75fb8a5-12ed-cfb0-e8ca-8c0181dda140/MasterData/oldWALs, maxLogs=10 2024-12-09T00:46:13,723 INFO [master/1bae0942de96:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 1bae0942de96%2C38957%2C1733705173387.1733705173722 2024-12-09T00:46:13,732 INFO [master/1bae0942de96:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/e75fb8a5-12ed-cfb0-e8ca-8c0181dda140/MasterData/WALs/1bae0942de96,38957,1733705173387/1bae0942de96%2C38957%2C1733705173387.1733705173722 2024-12-09T00:46:13,733 DEBUG [master/1bae0942de96:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37211:37211),(127.0.0.1/127.0.0.1:40055:40055),(127.0.0.1/127.0.0.1:34799:34799)] 2024-12-09T00:46:13,737 DEBUG [master/1bae0942de96:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-09T00:46:13,737 DEBUG [master/1bae0942de96:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T00:46:13,737 DEBUG [master/1bae0942de96:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T00:46:13,737 DEBUG [master/1bae0942de96:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T00:46:13,739 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T00:46:13,741 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-09T00:46:13,741 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T00:46:13,742 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T00:46:13,742 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T00:46:13,744 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-09T00:46:13,744 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T00:46:13,745 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T00:46:13,745 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T00:46:13,747 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-09T00:46:13,747 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T00:46:13,748 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T00:46:13,748 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T00:46:13,750 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-09T00:46:13,750 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T00:46:13,751 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T00:46:13,751 DEBUG [master/1bae0942de96:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T00:46:13,752 DEBUG [master/1bae0942de96:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37501/user/jenkins/test-data/e75fb8a5-12ed-cfb0-e8ca-8c0181dda140/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-09T00:46:13,753 DEBUG [master/1bae0942de96:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37501/user/jenkins/test-data/e75fb8a5-12ed-cfb0-e8ca-8c0181dda140/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-09T00:46:13,754 DEBUG [master/1bae0942de96:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T00:46:13,755 DEBUG [master/1bae0942de96:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T00:46:13,755 DEBUG [master/1bae0942de96:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-09T00:46:13,757 DEBUG [master/1bae0942de96:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T00:46:13,759 DEBUG [master/1bae0942de96:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37501/user/jenkins/test-data/e75fb8a5-12ed-cfb0-e8ca-8c0181dda140/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T00:46:13,760 INFO [master/1bae0942de96:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=66056647, jitterRate=-0.0156792551279068}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-09T00:46:13,761 DEBUG [master/1bae0942de96:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733705173737Initializing all the Stores at 1733705173739 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733705173739Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733705173739Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733705173739Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733705173739Cleaning up temporary data from old regions at 1733705173755 (+16 ms)Region opened successfully at 1733705173761 (+6 ms) 2024-12-09T00:46:13,762 INFO [master/1bae0942de96:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-09T00:46:13,766 DEBUG [master/1bae0942de96:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2dd51fc4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=1bae0942de96/172.17.0.2:0 2024-12-09T00:46:13,768 INFO [master/1bae0942de96:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-09T00:46:13,768 INFO [master/1bae0942de96:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-09T00:46:13,768 INFO [master/1bae0942de96:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-09T00:46:13,768 INFO [master/1bae0942de96:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-09T00:46:13,769 INFO [master/1bae0942de96:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-12-09T00:46:13,770 INFO [master/1bae0942de96:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-12-09T00:46:13,770 INFO [master/1bae0942de96:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-09T00:46:13,773 INFO [master/1bae0942de96:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-09T00:46:13,774 DEBUG [master/1bae0942de96:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38957-0x100081748120000, quorum=127.0.0.1:63217, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-09T00:46:13,796 DEBUG [master/1bae0942de96:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-09T00:46:13,797 INFO [master/1bae0942de96:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-09T00:46:13,798 DEBUG [master/1bae0942de96:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38957-0x100081748120000, quorum=127.0.0.1:63217, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-09T00:46:13,804 DEBUG [master/1bae0942de96:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-09T00:46:13,805 INFO [master/1bae0942de96:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-09T00:46:13,806 DEBUG [master/1bae0942de96:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38957-0x100081748120000, quorum=127.0.0.1:63217, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-09T00:46:13,812 DEBUG [master/1bae0942de96:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-09T00:46:13,814 DEBUG [master/1bae0942de96:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38957-0x100081748120000, quorum=127.0.0.1:63217, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-09T00:46:13,821 DEBUG [master/1bae0942de96:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-09T00:46:13,824 DEBUG [master/1bae0942de96:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38957-0x100081748120000, quorum=127.0.0.1:63217, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-09T00:46:13,829 DEBUG [master/1bae0942de96:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-09T00:46:13,838 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32815-0x100081748120002, quorum=127.0.0.1:63217, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-09T00:46:13,838 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33411-0x100081748120003, quorum=127.0.0.1:63217, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-09T00:46:13,838 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36541-0x100081748120001, quorum=127.0.0.1:63217, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-09T00:46:13,838 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38957-0x100081748120000, quorum=127.0.0.1:63217, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-09T00:46:13,838 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33411-0x100081748120003, quorum=127.0.0.1:63217, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T00:46:13,838 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32815-0x100081748120002, quorum=127.0.0.1:63217, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T00:46:13,838 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38957-0x100081748120000, quorum=127.0.0.1:63217, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T00:46:13,838 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36541-0x100081748120001, quorum=127.0.0.1:63217, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T00:46:13,838 INFO [master/1bae0942de96:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=1bae0942de96,38957,1733705173387, sessionid=0x100081748120000, setting cluster-up flag (Was=false) 2024-12-09T00:46:13,854 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36541-0x100081748120001, quorum=127.0.0.1:63217, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T00:46:13,854 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32815-0x100081748120002, quorum=127.0.0.1:63217, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T00:46:13,854 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38957-0x100081748120000, quorum=127.0.0.1:63217, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T00:46:13,854 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33411-0x100081748120003, quorum=127.0.0.1:63217, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T00:46:13,879 DEBUG [master/1bae0942de96:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-09T00:46:13,880 DEBUG [master/1bae0942de96:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=1bae0942de96,38957,1733705173387 2024-12-09T00:46:13,896 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36541-0x100081748120001, quorum=127.0.0.1:63217, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T00:46:13,896 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32815-0x100081748120002, quorum=127.0.0.1:63217, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T00:46:13,896 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38957-0x100081748120000, quorum=127.0.0.1:63217, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T00:46:13,896 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33411-0x100081748120003, quorum=127.0.0.1:63217, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T00:46:13,921 DEBUG [master/1bae0942de96:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-09T00:46:13,924 DEBUG [master/1bae0942de96:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=1bae0942de96,38957,1733705173387 2024-12-09T00:46:13,928 INFO [master/1bae0942de96:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:37501/user/jenkins/test-data/e75fb8a5-12ed-cfb0-e8ca-8c0181dda140/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-09T00:46:13,932 DEBUG [master/1bae0942de96:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-09T00:46:13,933 INFO [master/1bae0942de96:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-09T00:46:13,933 INFO [master/1bae0942de96:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-09T00:46:13,933 DEBUG [master/1bae0942de96:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 1bae0942de96,38957,1733705173387 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-09T00:46:13,936 DEBUG [master/1bae0942de96:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/1bae0942de96:0, corePoolSize=5, maxPoolSize=5 2024-12-09T00:46:13,936 DEBUG [master/1bae0942de96:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/1bae0942de96:0, corePoolSize=5, maxPoolSize=5 2024-12-09T00:46:13,936 DEBUG [master/1bae0942de96:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/1bae0942de96:0, corePoolSize=5, maxPoolSize=5 2024-12-09T00:46:13,936 DEBUG [master/1bae0942de96:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/1bae0942de96:0, corePoolSize=5, maxPoolSize=5 2024-12-09T00:46:13,936 DEBUG [master/1bae0942de96:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/1bae0942de96:0, corePoolSize=10, maxPoolSize=10 2024-12-09T00:46:13,937 DEBUG [master/1bae0942de96:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/1bae0942de96:0, corePoolSize=1, maxPoolSize=1 2024-12-09T00:46:13,937 DEBUG [master/1bae0942de96:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/1bae0942de96:0, corePoolSize=2, maxPoolSize=2 2024-12-09T00:46:13,937 DEBUG [master/1bae0942de96:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/1bae0942de96:0, corePoolSize=1, maxPoolSize=1 2024-12-09T00:46:13,938 INFO [master/1bae0942de96:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733705203938 2024-12-09T00:46:13,939 INFO [master/1bae0942de96:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-09T00:46:13,939 INFO [master/1bae0942de96:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-09T00:46:13,939 INFO [master/1bae0942de96:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-09T00:46:13,939 INFO [master/1bae0942de96:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-09T00:46:13,939 INFO [master/1bae0942de96:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-09T00:46:13,939 INFO [master/1bae0942de96:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-09T00:46:13,939 INFO [master/1bae0942de96:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-09T00:46:13,940 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-09T00:46:13,940 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-09T00:46:13,940 INFO [master/1bae0942de96:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-09T00:46:13,940 INFO [master/1bae0942de96:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-09T00:46:13,941 INFO [master/1bae0942de96:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-09T00:46:13,941 INFO [master/1bae0942de96:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-09T00:46:13,941 INFO [master/1bae0942de96:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-09T00:46:13,941 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T00:46:13,942 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-09T00:46:13,942 DEBUG [master/1bae0942de96:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/1bae0942de96:0:becomeActiveMaster-HFileCleaner.large.0-1733705173942,5,FailOnTimeoutGroup] 2024-12-09T00:46:13,942 DEBUG [master/1bae0942de96:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/1bae0942de96:0:becomeActiveMaster-HFileCleaner.small.0-1733705173942,5,FailOnTimeoutGroup] 2024-12-09T00:46:13,942 INFO [master/1bae0942de96:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-09T00:46:13,942 INFO [master/1bae0942de96:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-09T00:46:13,942 INFO [master/1bae0942de96:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-09T00:46:13,943 INFO [master/1bae0942de96:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-09T00:46:13,955 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46307 is added to blk_1073741831_1007 (size=1321) 2024-12-09T00:46:13,955 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37429 is added to blk_1073741831_1007 (size=1321) 2024-12-09T00:46:13,956 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33741 is added to blk_1073741831_1007 (size=1321) 2024-12-09T00:46:13,957 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:37501/user/jenkins/test-data/e75fb8a5-12ed-cfb0-e8ca-8c0181dda140/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-09T00:46:13,957 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:37501/user/jenkins/test-data/e75fb8a5-12ed-cfb0-e8ca-8c0181dda140 2024-12-09T00:46:13,966 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37429 is added to blk_1073741832_1008 (size=32) 2024-12-09T00:46:13,966 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46307 is added to blk_1073741832_1008 (size=32) 2024-12-09T00:46:13,966 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33741 is added to blk_1073741832_1008 (size=32) 2024-12-09T00:46:13,967 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T00:46:13,968 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-09T00:46:13,969 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-09T00:46:13,970 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T00:46:13,970 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T00:46:13,970 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-09T00:46:13,972 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-09T00:46:13,972 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T00:46:13,973 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T00:46:13,973 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-09T00:46:13,974 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-09T00:46:13,974 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T00:46:13,975 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T00:46:13,975 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-09T00:46:13,976 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-09T00:46:13,977 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T00:46:13,977 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T00:46:13,977 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-09T00:46:13,978 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37501/user/jenkins/test-data/e75fb8a5-12ed-cfb0-e8ca-8c0181dda140/data/hbase/meta/1588230740 2024-12-09T00:46:13,979 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37501/user/jenkins/test-data/e75fb8a5-12ed-cfb0-e8ca-8c0181dda140/data/hbase/meta/1588230740 2024-12-09T00:46:13,980 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-09T00:46:13,980 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-09T00:46:13,981 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-09T00:46:13,982 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-09T00:46:13,984 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37501/user/jenkins/test-data/e75fb8a5-12ed-cfb0-e8ca-8c0181dda140/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T00:46:13,985 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=73688146, jitterRate=0.0980389416217804}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-09T00:46:13,985 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733705173967Initializing all the Stores at 1733705173968 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733705173968Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733705173968Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733705173968Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733705173968Cleaning up temporary data from old regions at 1733705173980 (+12 ms)Region opened successfully at 1733705173985 (+5 ms) 2024-12-09T00:46:13,986 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-09T00:46:13,986 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-09T00:46:13,986 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-09T00:46:13,986 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-09T00:46:13,986 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-09T00:46:13,986 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-09T00:46:13,986 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733705173986Disabling compacts and flushes for region at 1733705173986Disabling writes for close at 1733705173986Writing region close event to WAL at 1733705173986Closed at 1733705173986 2024-12-09T00:46:13,988 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-09T00:46:13,988 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-09T00:46:13,988 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-09T00:46:13,990 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-09T00:46:13,991 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-09T00:46:14,005 INFO [RS:0;1bae0942de96:36541 {}] regionserver.HRegionServer(746): ClusterId : 423cde5c-15e4-4c00-97ef-b54d75535bdf 2024-12-09T00:46:14,005 INFO [RS:2;1bae0942de96:33411 {}] regionserver.HRegionServer(746): ClusterId : 423cde5c-15e4-4c00-97ef-b54d75535bdf 2024-12-09T00:46:14,005 INFO [RS:1;1bae0942de96:32815 {}] regionserver.HRegionServer(746): ClusterId : 423cde5c-15e4-4c00-97ef-b54d75535bdf 2024-12-09T00:46:14,005 DEBUG [RS:2;1bae0942de96:33411 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-09T00:46:14,005 DEBUG [RS:0;1bae0942de96:36541 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-09T00:46:14,005 DEBUG [RS:1;1bae0942de96:32815 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-09T00:46:14,038 DEBUG [RS:1;1bae0942de96:32815 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-09T00:46:14,038 DEBUG [RS:2;1bae0942de96:33411 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-09T00:46:14,038 DEBUG [RS:1;1bae0942de96:32815 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-09T00:46:14,038 DEBUG [RS:2;1bae0942de96:33411 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-09T00:46:14,039 DEBUG [RS:0;1bae0942de96:36541 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-09T00:46:14,039 DEBUG [RS:0;1bae0942de96:36541 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-09T00:46:14,054 DEBUG [RS:1;1bae0942de96:32815 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-09T00:46:14,055 DEBUG [RS:2;1bae0942de96:33411 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-09T00:46:14,055 DEBUG [RS:0;1bae0942de96:36541 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-09T00:46:14,055 DEBUG [RS:1;1bae0942de96:32815 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3b5c6999, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=1bae0942de96/172.17.0.2:0 2024-12-09T00:46:14,055 DEBUG [RS:2;1bae0942de96:33411 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5c5268f1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=1bae0942de96/172.17.0.2:0 2024-12-09T00:46:14,055 DEBUG [RS:0;1bae0942de96:36541 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3d8a2900, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=1bae0942de96/172.17.0.2:0 2024-12-09T00:46:14,067 DEBUG [RS:0;1bae0942de96:36541 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;1bae0942de96:36541 2024-12-09T00:46:14,067 INFO [RS:0;1bae0942de96:36541 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-09T00:46:14,067 INFO [RS:0;1bae0942de96:36541 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-09T00:46:14,067 DEBUG [RS:0;1bae0942de96:36541 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-09T00:46:14,068 INFO [RS:0;1bae0942de96:36541 {}] regionserver.HRegionServer(2659): reportForDuty to master=1bae0942de96,38957,1733705173387 with port=36541, startcode=1733705173517 2024-12-09T00:46:14,069 DEBUG [RS:0;1bae0942de96:36541 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-09T00:46:14,071 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45223, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.3 (auth:SIMPLE), service=RegionServerStatusService 2024-12-09T00:46:14,072 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38957 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 1bae0942de96,36541,1733705173517 2024-12-09T00:46:14,072 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38957 {}] master.ServerManager(517): Registering regionserver=1bae0942de96,36541,1733705173517 2024-12-09T00:46:14,073 DEBUG [RS:1;1bae0942de96:32815 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;1bae0942de96:32815 2024-12-09T00:46:14,073 DEBUG [RS:2;1bae0942de96:33411 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:2;1bae0942de96:33411 2024-12-09T00:46:14,073 INFO [RS:1;1bae0942de96:32815 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-09T00:46:14,073 INFO [RS:2;1bae0942de96:33411 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-09T00:46:14,073 INFO [RS:1;1bae0942de96:32815 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-09T00:46:14,073 INFO [RS:2;1bae0942de96:33411 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-09T00:46:14,073 DEBUG [RS:2;1bae0942de96:33411 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-09T00:46:14,073 DEBUG [RS:1;1bae0942de96:32815 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-09T00:46:14,074 INFO [RS:1;1bae0942de96:32815 {}] regionserver.HRegionServer(2659): reportForDuty to master=1bae0942de96,38957,1733705173387 with port=32815, startcode=1733705173549 2024-12-09T00:46:14,074 INFO [RS:2;1bae0942de96:33411 {}] regionserver.HRegionServer(2659): reportForDuty to master=1bae0942de96,38957,1733705173387 with port=33411, startcode=1733705173582 2024-12-09T00:46:14,074 DEBUG [RS:1;1bae0942de96:32815 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-09T00:46:14,074 DEBUG [RS:2;1bae0942de96:33411 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-09T00:46:14,075 DEBUG [RS:0;1bae0942de96:36541 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:37501/user/jenkins/test-data/e75fb8a5-12ed-cfb0-e8ca-8c0181dda140 2024-12-09T00:46:14,075 DEBUG [RS:0;1bae0942de96:36541 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:37501 2024-12-09T00:46:14,075 DEBUG [RS:0;1bae0942de96:36541 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-09T00:46:14,076 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42247, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.5 (auth:SIMPLE), service=RegionServerStatusService 2024-12-09T00:46:14,076 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37987, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.4 (auth:SIMPLE), service=RegionServerStatusService 2024-12-09T00:46:14,076 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38957 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 1bae0942de96,33411,1733705173582 2024-12-09T00:46:14,076 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38957 {}] master.ServerManager(517): Registering regionserver=1bae0942de96,33411,1733705173582 2024-12-09T00:46:14,078 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38957 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 1bae0942de96,32815,1733705173549 2024-12-09T00:46:14,078 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38957 {}] master.ServerManager(517): Registering regionserver=1bae0942de96,32815,1733705173549 2024-12-09T00:46:14,078 DEBUG [RS:2;1bae0942de96:33411 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:37501/user/jenkins/test-data/e75fb8a5-12ed-cfb0-e8ca-8c0181dda140 2024-12-09T00:46:14,078 DEBUG [RS:2;1bae0942de96:33411 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:37501 2024-12-09T00:46:14,079 DEBUG [RS:2;1bae0942de96:33411 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-09T00:46:14,079 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38957-0x100081748120000, quorum=127.0.0.1:63217, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-09T00:46:14,080 DEBUG [RS:1;1bae0942de96:32815 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:37501/user/jenkins/test-data/e75fb8a5-12ed-cfb0-e8ca-8c0181dda140 2024-12-09T00:46:14,081 DEBUG [RS:1;1bae0942de96:32815 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:37501 2024-12-09T00:46:14,081 DEBUG [RS:1;1bae0942de96:32815 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-09T00:46:14,106 DEBUG [RS:0;1bae0942de96:36541 {}] zookeeper.ZKUtil(111): regionserver:36541-0x100081748120001, quorum=127.0.0.1:63217, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/1bae0942de96,36541,1733705173517 2024-12-09T00:46:14,106 WARN [RS:0;1bae0942de96:36541 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-09T00:46:14,106 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [1bae0942de96,33411,1733705173582] 2024-12-09T00:46:14,106 INFO [RS:0;1bae0942de96:36541 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-09T00:46:14,106 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [1bae0942de96,36541,1733705173517] 2024-12-09T00:46:14,106 DEBUG [RS:0;1bae0942de96:36541 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:37501/user/jenkins/test-data/e75fb8a5-12ed-cfb0-e8ca-8c0181dda140/WALs/1bae0942de96,36541,1733705173517 2024-12-09T00:46:14,121 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38957-0x100081748120000, quorum=127.0.0.1:63217, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-09T00:46:14,121 DEBUG [RS:2;1bae0942de96:33411 {}] zookeeper.ZKUtil(111): regionserver:33411-0x100081748120003, quorum=127.0.0.1:63217, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/1bae0942de96,33411,1733705173582 2024-12-09T00:46:14,121 WARN [RS:2;1bae0942de96:33411 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-09T00:46:14,122 INFO [RS:2;1bae0942de96:33411 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-09T00:46:14,122 DEBUG [RS:2;1bae0942de96:33411 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:37501/user/jenkins/test-data/e75fb8a5-12ed-cfb0-e8ca-8c0181dda140/WALs/1bae0942de96,33411,1733705173582 2024-12-09T00:46:14,122 DEBUG [RS:1;1bae0942de96:32815 {}] zookeeper.ZKUtil(111): regionserver:32815-0x100081748120002, quorum=127.0.0.1:63217, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/1bae0942de96,32815,1733705173549 2024-12-09T00:46:14,122 WARN [RS:1;1bae0942de96:32815 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-09T00:46:14,122 INFO [RS:1;1bae0942de96:32815 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-09T00:46:14,123 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [1bae0942de96,32815,1733705173549] 2024-12-09T00:46:14,123 DEBUG [RS:1;1bae0942de96:32815 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:37501/user/jenkins/test-data/e75fb8a5-12ed-cfb0-e8ca-8c0181dda140/WALs/1bae0942de96,32815,1733705173549 2024-12-09T00:46:14,124 INFO [RS:0;1bae0942de96:36541 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-09T00:46:14,128 INFO [RS:0;1bae0942de96:36541 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-09T00:46:14,128 INFO [RS:0;1bae0942de96:36541 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-09T00:46:14,128 INFO [RS:0;1bae0942de96:36541 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T00:46:14,129 INFO [RS:2;1bae0942de96:33411 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-09T00:46:14,129 INFO [RS:0;1bae0942de96:36541 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-09T00:46:14,131 INFO [RS:0;1bae0942de96:36541 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-09T00:46:14,131 INFO [RS:0;1bae0942de96:36541 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-09T00:46:14,131 DEBUG [RS:0;1bae0942de96:36541 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/1bae0942de96:0, corePoolSize=1, maxPoolSize=1 2024-12-09T00:46:14,131 DEBUG [RS:0;1bae0942de96:36541 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/1bae0942de96:0, corePoolSize=1, maxPoolSize=1 2024-12-09T00:46:14,132 DEBUG [RS:0;1bae0942de96:36541 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/1bae0942de96:0, corePoolSize=1, maxPoolSize=1 2024-12-09T00:46:14,132 DEBUG [RS:0;1bae0942de96:36541 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/1bae0942de96:0, corePoolSize=1, maxPoolSize=1 2024-12-09T00:46:14,132 DEBUG [RS:0;1bae0942de96:36541 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/1bae0942de96:0, corePoolSize=1, maxPoolSize=1 2024-12-09T00:46:14,132 DEBUG [RS:0;1bae0942de96:36541 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/1bae0942de96:0, corePoolSize=2, maxPoolSize=2 2024-12-09T00:46:14,132 INFO [RS:1;1bae0942de96:32815 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-09T00:46:14,132 DEBUG [RS:0;1bae0942de96:36541 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/1bae0942de96:0, corePoolSize=1, maxPoolSize=1 2024-12-09T00:46:14,132 INFO [RS:2;1bae0942de96:33411 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-09T00:46:14,132 DEBUG [RS:0;1bae0942de96:36541 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/1bae0942de96:0, corePoolSize=1, maxPoolSize=1 2024-12-09T00:46:14,132 DEBUG [RS:0;1bae0942de96:36541 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/1bae0942de96:0, corePoolSize=1, maxPoolSize=1 2024-12-09T00:46:14,132 DEBUG [RS:0;1bae0942de96:36541 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/1bae0942de96:0, corePoolSize=1, maxPoolSize=1 2024-12-09T00:46:14,133 DEBUG [RS:0;1bae0942de96:36541 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/1bae0942de96:0, corePoolSize=1, maxPoolSize=1 2024-12-09T00:46:14,133 DEBUG [RS:0;1bae0942de96:36541 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/1bae0942de96:0, corePoolSize=1, maxPoolSize=1 2024-12-09T00:46:14,133 DEBUG [RS:0;1bae0942de96:36541 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/1bae0942de96:0, corePoolSize=3, maxPoolSize=3 2024-12-09T00:46:14,133 DEBUG [RS:0;1bae0942de96:36541 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/1bae0942de96:0, corePoolSize=3, maxPoolSize=3 2024-12-09T00:46:14,135 INFO [RS:2;1bae0942de96:33411 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-09T00:46:14,135 INFO [RS:1;1bae0942de96:32815 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-09T00:46:14,135 INFO [RS:2;1bae0942de96:33411 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T00:46:14,135 INFO [RS:0;1bae0942de96:36541 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T00:46:14,135 INFO [RS:0;1bae0942de96:36541 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T00:46:14,135 INFO [RS:0;1bae0942de96:36541 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T00:46:14,135 INFO [RS:1;1bae0942de96:32815 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-09T00:46:14,135 INFO [RS:0;1bae0942de96:36541 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-09T00:46:14,135 INFO [RS:1;1bae0942de96:32815 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T00:46:14,135 INFO [RS:0;1bae0942de96:36541 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-09T00:46:14,136 INFO [RS:0;1bae0942de96:36541 {}] hbase.ChoreService(168): Chore ScheduledChore name=1bae0942de96,36541,1733705173517-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-09T00:46:14,136 INFO [RS:2;1bae0942de96:33411 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-09T00:46:14,136 INFO [RS:1;1bae0942de96:32815 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-09T00:46:14,137 INFO [RS:2;1bae0942de96:33411 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-09T00:46:14,137 INFO [RS:1;1bae0942de96:32815 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-09T00:46:14,137 INFO [RS:2;1bae0942de96:33411 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-09T00:46:14,137 INFO [RS:1;1bae0942de96:32815 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-09T00:46:14,137 DEBUG [RS:2;1bae0942de96:33411 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/1bae0942de96:0, corePoolSize=1, maxPoolSize=1 2024-12-09T00:46:14,137 DEBUG [RS:1;1bae0942de96:32815 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/1bae0942de96:0, corePoolSize=1, maxPoolSize=1 2024-12-09T00:46:14,137 DEBUG [RS:1;1bae0942de96:32815 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/1bae0942de96:0, corePoolSize=1, maxPoolSize=1 2024-12-09T00:46:14,137 DEBUG [RS:2;1bae0942de96:33411 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/1bae0942de96:0, corePoolSize=1, maxPoolSize=1 2024-12-09T00:46:14,137 DEBUG [RS:1;1bae0942de96:32815 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/1bae0942de96:0, corePoolSize=1, maxPoolSize=1 2024-12-09T00:46:14,137 DEBUG [RS:2;1bae0942de96:33411 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/1bae0942de96:0, corePoolSize=1, maxPoolSize=1 2024-12-09T00:46:14,137 DEBUG [RS:2;1bae0942de96:33411 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/1bae0942de96:0, corePoolSize=1, maxPoolSize=1 2024-12-09T00:46:14,137 DEBUG [RS:1;1bae0942de96:32815 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/1bae0942de96:0, corePoolSize=1, maxPoolSize=1 2024-12-09T00:46:14,138 DEBUG [RS:1;1bae0942de96:32815 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/1bae0942de96:0, corePoolSize=1, maxPoolSize=1 2024-12-09T00:46:14,138 DEBUG [RS:2;1bae0942de96:33411 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/1bae0942de96:0, corePoolSize=1, maxPoolSize=1 2024-12-09T00:46:14,138 DEBUG [RS:1;1bae0942de96:32815 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/1bae0942de96:0, corePoolSize=2, maxPoolSize=2 2024-12-09T00:46:14,138 DEBUG [RS:2;1bae0942de96:33411 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/1bae0942de96:0, corePoolSize=2, maxPoolSize=2 2024-12-09T00:46:14,138 DEBUG [RS:1;1bae0942de96:32815 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/1bae0942de96:0, corePoolSize=1, maxPoolSize=1 2024-12-09T00:46:14,138 DEBUG [RS:2;1bae0942de96:33411 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/1bae0942de96:0, corePoolSize=1, maxPoolSize=1 2024-12-09T00:46:14,138 DEBUG [RS:1;1bae0942de96:32815 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/1bae0942de96:0, corePoolSize=1, maxPoolSize=1 2024-12-09T00:46:14,138 DEBUG [RS:2;1bae0942de96:33411 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/1bae0942de96:0, corePoolSize=1, maxPoolSize=1 2024-12-09T00:46:14,138 DEBUG [RS:1;1bae0942de96:32815 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/1bae0942de96:0, corePoolSize=1, maxPoolSize=1 2024-12-09T00:46:14,138 DEBUG [RS:2;1bae0942de96:33411 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/1bae0942de96:0, corePoolSize=1, maxPoolSize=1 2024-12-09T00:46:14,138 DEBUG [RS:2;1bae0942de96:33411 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/1bae0942de96:0, corePoolSize=1, maxPoolSize=1 2024-12-09T00:46:14,138 DEBUG [RS:1;1bae0942de96:32815 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/1bae0942de96:0, corePoolSize=1, maxPoolSize=1 2024-12-09T00:46:14,138 DEBUG [RS:2;1bae0942de96:33411 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/1bae0942de96:0, corePoolSize=1, maxPoolSize=1 2024-12-09T00:46:14,138 DEBUG [RS:1;1bae0942de96:32815 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/1bae0942de96:0, corePoolSize=1, maxPoolSize=1 2024-12-09T00:46:14,138 DEBUG [RS:2;1bae0942de96:33411 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/1bae0942de96:0, corePoolSize=1, maxPoolSize=1 2024-12-09T00:46:14,138 DEBUG [RS:1;1bae0942de96:32815 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/1bae0942de96:0, corePoolSize=1, maxPoolSize=1 2024-12-09T00:46:14,138 DEBUG [RS:2;1bae0942de96:33411 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/1bae0942de96:0, corePoolSize=3, maxPoolSize=3 2024-12-09T00:46:14,138 DEBUG [RS:1;1bae0942de96:32815 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/1bae0942de96:0, corePoolSize=3, maxPoolSize=3 2024-12-09T00:46:14,138 DEBUG [RS:1;1bae0942de96:32815 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/1bae0942de96:0, corePoolSize=3, maxPoolSize=3 2024-12-09T00:46:14,138 DEBUG [RS:2;1bae0942de96:33411 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/1bae0942de96:0, corePoolSize=3, maxPoolSize=3 2024-12-09T00:46:14,139 INFO [RS:2;1bae0942de96:33411 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T00:46:14,139 INFO [RS:1;1bae0942de96:32815 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T00:46:14,139 INFO [RS:2;1bae0942de96:33411 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T00:46:14,139 INFO [RS:1;1bae0942de96:32815 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T00:46:14,139 INFO [RS:2;1bae0942de96:33411 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T00:46:14,140 INFO [RS:1;1bae0942de96:32815 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T00:46:14,140 INFO [RS:2;1bae0942de96:33411 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-09T00:46:14,140 INFO [RS:1;1bae0942de96:32815 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-09T00:46:14,140 INFO [RS:2;1bae0942de96:33411 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-09T00:46:14,140 INFO [RS:1;1bae0942de96:32815 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-09T00:46:14,140 INFO [RS:2;1bae0942de96:33411 {}] hbase.ChoreService(168): Chore ScheduledChore name=1bae0942de96,33411,1733705173582-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-09T00:46:14,140 INFO [RS:1;1bae0942de96:32815 {}] hbase.ChoreService(168): Chore ScheduledChore name=1bae0942de96,32815,1733705173549-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-09T00:46:14,141 WARN [1bae0942de96:38957 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-09T00:46:14,149 INFO [RS:0;1bae0942de96:36541 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-09T00:46:14,149 INFO [RS:0;1bae0942de96:36541 {}] hbase.ChoreService(168): Chore ScheduledChore name=1bae0942de96,36541,1733705173517-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T00:46:14,149 INFO [RS:0;1bae0942de96:36541 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T00:46:14,149 INFO [RS:0;1bae0942de96:36541 {}] regionserver.Replication(171): 1bae0942de96,36541,1733705173517 started 2024-12-09T00:46:14,151 INFO [RS:2;1bae0942de96:33411 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-09T00:46:14,151 INFO [RS:1;1bae0942de96:32815 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-09T00:46:14,152 INFO [RS:2;1bae0942de96:33411 {}] hbase.ChoreService(168): Chore ScheduledChore name=1bae0942de96,33411,1733705173582-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T00:46:14,152 INFO [RS:1;1bae0942de96:32815 {}] hbase.ChoreService(168): Chore ScheduledChore name=1bae0942de96,32815,1733705173549-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T00:46:14,152 INFO [RS:2;1bae0942de96:33411 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T00:46:14,152 INFO [RS:1;1bae0942de96:32815 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T00:46:14,152 INFO [RS:2;1bae0942de96:33411 {}] regionserver.Replication(171): 1bae0942de96,33411,1733705173582 started 2024-12-09T00:46:14,152 INFO [RS:1;1bae0942de96:32815 {}] regionserver.Replication(171): 1bae0942de96,32815,1733705173549 started 2024-12-09T00:46:14,161 INFO [RS:0;1bae0942de96:36541 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T00:46:14,161 INFO [RS:0;1bae0942de96:36541 {}] regionserver.HRegionServer(1482): Serving as 1bae0942de96,36541,1733705173517, RpcServer on 1bae0942de96/172.17.0.2:36541, sessionid=0x100081748120001 2024-12-09T00:46:14,161 DEBUG [RS:0;1bae0942de96:36541 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-09T00:46:14,161 DEBUG [RS:0;1bae0942de96:36541 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 1bae0942de96,36541,1733705173517 2024-12-09T00:46:14,161 DEBUG [RS:0;1bae0942de96:36541 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '1bae0942de96,36541,1733705173517' 2024-12-09T00:46:14,161 DEBUG [RS:0;1bae0942de96:36541 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-09T00:46:14,162 DEBUG [RS:0;1bae0942de96:36541 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-09T00:46:14,162 DEBUG [RS:0;1bae0942de96:36541 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-09T00:46:14,162 DEBUG [RS:0;1bae0942de96:36541 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-09T00:46:14,162 DEBUG [RS:0;1bae0942de96:36541 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 1bae0942de96,36541,1733705173517 2024-12-09T00:46:14,162 DEBUG [RS:0;1bae0942de96:36541 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '1bae0942de96,36541,1733705173517' 2024-12-09T00:46:14,162 DEBUG [RS:0;1bae0942de96:36541 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-09T00:46:14,162 DEBUG [RS:0;1bae0942de96:36541 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-09T00:46:14,163 DEBUG [RS:0;1bae0942de96:36541 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-09T00:46:14,163 INFO [RS:0;1bae0942de96:36541 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-09T00:46:14,163 INFO [RS:0;1bae0942de96:36541 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-09T00:46:14,163 INFO [RS:1;1bae0942de96:32815 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T00:46:14,163 INFO [RS:1;1bae0942de96:32815 {}] regionserver.HRegionServer(1482): Serving as 1bae0942de96,32815,1733705173549, RpcServer on 1bae0942de96/172.17.0.2:32815, sessionid=0x100081748120002 2024-12-09T00:46:14,163 DEBUG [RS:1;1bae0942de96:32815 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-09T00:46:14,163 DEBUG [RS:1;1bae0942de96:32815 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 1bae0942de96,32815,1733705173549 2024-12-09T00:46:14,163 DEBUG [RS:1;1bae0942de96:32815 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '1bae0942de96,32815,1733705173549' 2024-12-09T00:46:14,163 DEBUG [RS:1;1bae0942de96:32815 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-09T00:46:14,164 INFO [RS:2;1bae0942de96:33411 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T00:46:14,164 DEBUG [RS:1;1bae0942de96:32815 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-09T00:46:14,164 INFO [RS:2;1bae0942de96:33411 {}] regionserver.HRegionServer(1482): Serving as 1bae0942de96,33411,1733705173582, RpcServer on 1bae0942de96/172.17.0.2:33411, sessionid=0x100081748120003 2024-12-09T00:46:14,164 DEBUG [RS:2;1bae0942de96:33411 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-09T00:46:14,164 DEBUG [RS:2;1bae0942de96:33411 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 1bae0942de96,33411,1733705173582 2024-12-09T00:46:14,164 DEBUG [RS:2;1bae0942de96:33411 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '1bae0942de96,33411,1733705173582' 2024-12-09T00:46:14,164 DEBUG [RS:2;1bae0942de96:33411 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-09T00:46:14,164 DEBUG [RS:1;1bae0942de96:32815 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-09T00:46:14,164 DEBUG [RS:1;1bae0942de96:32815 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-09T00:46:14,164 DEBUG [RS:1;1bae0942de96:32815 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 1bae0942de96,32815,1733705173549 2024-12-09T00:46:14,164 DEBUG [RS:1;1bae0942de96:32815 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '1bae0942de96,32815,1733705173549' 2024-12-09T00:46:14,165 DEBUG [RS:1;1bae0942de96:32815 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-09T00:46:14,165 DEBUG [RS:2;1bae0942de96:33411 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-09T00:46:14,165 DEBUG [RS:1;1bae0942de96:32815 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-09T00:46:14,165 DEBUG [RS:2;1bae0942de96:33411 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-09T00:46:14,165 DEBUG [RS:2;1bae0942de96:33411 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-09T00:46:14,165 DEBUG [RS:2;1bae0942de96:33411 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 1bae0942de96,33411,1733705173582 2024-12-09T00:46:14,165 DEBUG [RS:2;1bae0942de96:33411 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '1bae0942de96,33411,1733705173582' 2024-12-09T00:46:14,165 DEBUG [RS:2;1bae0942de96:33411 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-09T00:46:14,165 DEBUG [RS:1;1bae0942de96:32815 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-09T00:46:14,165 INFO [RS:1;1bae0942de96:32815 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-09T00:46:14,165 INFO [RS:1;1bae0942de96:32815 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-09T00:46:14,165 DEBUG [RS:2;1bae0942de96:33411 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-09T00:46:14,166 DEBUG [RS:2;1bae0942de96:33411 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-09T00:46:14,166 INFO [RS:2;1bae0942de96:33411 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-09T00:46:14,166 INFO [RS:2;1bae0942de96:33411 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-09T00:46:14,269 INFO [RS:0;1bae0942de96:36541 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=1bae0942de96%2C36541%2C1733705173517, suffix=, logDir=hdfs://localhost:37501/user/jenkins/test-data/e75fb8a5-12ed-cfb0-e8ca-8c0181dda140/WALs/1bae0942de96,36541,1733705173517, archiveDir=hdfs://localhost:37501/user/jenkins/test-data/e75fb8a5-12ed-cfb0-e8ca-8c0181dda140/oldWALs, maxLogs=32 2024-12-09T00:46:14,271 INFO [RS:1;1bae0942de96:32815 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=1bae0942de96%2C32815%2C1733705173549, suffix=, logDir=hdfs://localhost:37501/user/jenkins/test-data/e75fb8a5-12ed-cfb0-e8ca-8c0181dda140/WALs/1bae0942de96,32815,1733705173549, archiveDir=hdfs://localhost:37501/user/jenkins/test-data/e75fb8a5-12ed-cfb0-e8ca-8c0181dda140/oldWALs, maxLogs=32 2024-12-09T00:46:14,271 INFO [RS:2;1bae0942de96:33411 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=1bae0942de96%2C33411%2C1733705173582, suffix=, logDir=hdfs://localhost:37501/user/jenkins/test-data/e75fb8a5-12ed-cfb0-e8ca-8c0181dda140/WALs/1bae0942de96,33411,1733705173582, archiveDir=hdfs://localhost:37501/user/jenkins/test-data/e75fb8a5-12ed-cfb0-e8ca-8c0181dda140/oldWALs, maxLogs=32 2024-12-09T00:46:14,272 INFO [RS:0;1bae0942de96:36541 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 1bae0942de96%2C36541%2C1733705173517.1733705174271 2024-12-09T00:46:14,275 INFO [RS:2;1bae0942de96:33411 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 1bae0942de96%2C33411%2C1733705173582.1733705174275 2024-12-09T00:46:14,275 INFO [RS:1;1bae0942de96:32815 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 1bae0942de96%2C32815%2C1733705173549.1733705174275 2024-12-09T00:46:14,283 INFO [RS:0;1bae0942de96:36541 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/e75fb8a5-12ed-cfb0-e8ca-8c0181dda140/WALs/1bae0942de96,36541,1733705173517/1bae0942de96%2C36541%2C1733705173517.1733705174271 2024-12-09T00:46:14,290 DEBUG [RS:0;1bae0942de96:36541 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40055:40055),(127.0.0.1/127.0.0.1:34799:34799),(127.0.0.1/127.0.0.1:37211:37211)] 2024-12-09T00:46:14,291 INFO [RS:2;1bae0942de96:33411 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/e75fb8a5-12ed-cfb0-e8ca-8c0181dda140/WALs/1bae0942de96,33411,1733705173582/1bae0942de96%2C33411%2C1733705173582.1733705174275 2024-12-09T00:46:14,291 INFO [RS:1;1bae0942de96:32815 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/e75fb8a5-12ed-cfb0-e8ca-8c0181dda140/WALs/1bae0942de96,32815,1733705173549/1bae0942de96%2C32815%2C1733705173549.1733705174275 2024-12-09T00:46:14,292 DEBUG [RS:2;1bae0942de96:33411 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37211:37211),(127.0.0.1/127.0.0.1:40055:40055),(127.0.0.1/127.0.0.1:34799:34799)] 2024-12-09T00:46:14,292 DEBUG [RS:1;1bae0942de96:32815 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40055:40055),(127.0.0.1/127.0.0.1:37211:37211),(127.0.0.1/127.0.0.1:34799:34799)] 2024-12-09T00:46:14,391 DEBUG [1bae0942de96:38957 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=3, allServersCount=3 2024-12-09T00:46:14,392 DEBUG [1bae0942de96:38957 {}] balancer.BalancerClusterState(204): Hosts are {1bae0942de96=0} racks are {/default-rack=0} 2024-12-09T00:46:14,399 DEBUG [1bae0942de96:38957 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-09T00:46:14,399 DEBUG [1bae0942de96:38957 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-09T00:46:14,399 DEBUG [1bae0942de96:38957 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-09T00:46:14,399 DEBUG [1bae0942de96:38957 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-09T00:46:14,399 DEBUG [1bae0942de96:38957 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-09T00:46:14,399 DEBUG [1bae0942de96:38957 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-09T00:46:14,399 INFO [1bae0942de96:38957 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-09T00:46:14,399 INFO [1bae0942de96:38957 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-09T00:46:14,399 INFO [1bae0942de96:38957 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-09T00:46:14,399 DEBUG [1bae0942de96:38957 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-09T00:46:14,400 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=1bae0942de96,36541,1733705173517 2024-12-09T00:46:14,402 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 1bae0942de96,36541,1733705173517, state=OPENING 2024-12-09T00:46:14,429 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-09T00:46:14,438 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33411-0x100081748120003, quorum=127.0.0.1:63217, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T00:46:14,438 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38957-0x100081748120000, quorum=127.0.0.1:63217, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T00:46:14,438 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36541-0x100081748120001, quorum=127.0.0.1:63217, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T00:46:14,438 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32815-0x100081748120002, quorum=127.0.0.1:63217, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T00:46:14,439 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T00:46:14,439 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T00:46:14,439 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T00:46:14,439 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-09T00:46:14,439 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T00:46:14,439 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=1bae0942de96,36541,1733705173517}] 2024-12-09T00:46:14,453 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-09T00:46:14,461 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T00:46:14,503 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T00:46:14,503 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T00:46:14,596 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-09T00:46:14,599 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43507, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-09T00:46:14,607 INFO [RS_OPEN_META-regionserver/1bae0942de96:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-09T00:46:14,608 INFO [RS_OPEN_META-regionserver/1bae0942de96:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-09T00:46:14,611 INFO [RS_OPEN_META-regionserver/1bae0942de96:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=1bae0942de96%2C36541%2C1733705173517.meta, suffix=.meta, logDir=hdfs://localhost:37501/user/jenkins/test-data/e75fb8a5-12ed-cfb0-e8ca-8c0181dda140/WALs/1bae0942de96,36541,1733705173517, archiveDir=hdfs://localhost:37501/user/jenkins/test-data/e75fb8a5-12ed-cfb0-e8ca-8c0181dda140/oldWALs, maxLogs=32 2024-12-09T00:46:14,612 INFO [RS_OPEN_META-regionserver/1bae0942de96:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 1bae0942de96%2C36541%2C1733705173517.meta.1733705174611.meta 2024-12-09T00:46:14,621 INFO [RS_OPEN_META-regionserver/1bae0942de96:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/e75fb8a5-12ed-cfb0-e8ca-8c0181dda140/WALs/1bae0942de96,36541,1733705173517/1bae0942de96%2C36541%2C1733705173517.meta.1733705174611.meta 2024-12-09T00:46:14,625 DEBUG [RS_OPEN_META-regionserver/1bae0942de96:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34799:34799),(127.0.0.1/127.0.0.1:37211:37211),(127.0.0.1/127.0.0.1:40055:40055)] 2024-12-09T00:46:14,627 DEBUG [RS_OPEN_META-regionserver/1bae0942de96:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-09T00:46:14,627 DEBUG [RS_OPEN_META-regionserver/1bae0942de96:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-09T00:46:14,627 DEBUG [RS_OPEN_META-regionserver/1bae0942de96:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-09T00:46:14,627 INFO [RS_OPEN_META-regionserver/1bae0942de96:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-09T00:46:14,627 DEBUG [RS_OPEN_META-regionserver/1bae0942de96:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-09T00:46:14,627 DEBUG [RS_OPEN_META-regionserver/1bae0942de96:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T00:46:14,628 DEBUG [RS_OPEN_META-regionserver/1bae0942de96:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-09T00:46:14,628 DEBUG [RS_OPEN_META-regionserver/1bae0942de96:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-09T00:46:14,630 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-09T00:46:14,631 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-09T00:46:14,631 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T00:46:14,631 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T00:46:14,631 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-09T00:46:14,632 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-09T00:46:14,632 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T00:46:14,633 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T00:46:14,633 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-09T00:46:14,634 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-09T00:46:14,634 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T00:46:14,635 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T00:46:14,635 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-09T00:46:14,636 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-09T00:46:14,636 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T00:46:14,637 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T00:46:14,637 DEBUG [RS_OPEN_META-regionserver/1bae0942de96:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-09T00:46:14,638 DEBUG [RS_OPEN_META-regionserver/1bae0942de96:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37501/user/jenkins/test-data/e75fb8a5-12ed-cfb0-e8ca-8c0181dda140/data/hbase/meta/1588230740 2024-12-09T00:46:14,639 DEBUG [RS_OPEN_META-regionserver/1bae0942de96:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37501/user/jenkins/test-data/e75fb8a5-12ed-cfb0-e8ca-8c0181dda140/data/hbase/meta/1588230740 2024-12-09T00:46:14,641 DEBUG [RS_OPEN_META-regionserver/1bae0942de96:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-09T00:46:14,641 DEBUG [RS_OPEN_META-regionserver/1bae0942de96:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-09T00:46:14,641 DEBUG [RS_OPEN_META-regionserver/1bae0942de96:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-09T00:46:14,643 DEBUG [RS_OPEN_META-regionserver/1bae0942de96:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-09T00:46:14,644 INFO [RS_OPEN_META-regionserver/1bae0942de96:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=63286465, jitterRate=-0.05695818364620209}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-09T00:46:14,644 DEBUG [RS_OPEN_META-regionserver/1bae0942de96:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-09T00:46:14,645 DEBUG [RS_OPEN_META-regionserver/1bae0942de96:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733705174628Writing region info on filesystem at 1733705174628Initializing all the Stores at 1733705174629 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733705174629Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733705174629Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733705174630 (+1 ms)Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733705174630Cleaning up temporary data from old regions at 1733705174641 (+11 ms)Running coprocessor post-open hooks at 1733705174644 (+3 ms)Region opened successfully at 1733705174645 (+1 ms) 2024-12-09T00:46:14,647 INFO [RS_OPEN_META-regionserver/1bae0942de96:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733705174595 2024-12-09T00:46:14,650 DEBUG [RS_OPEN_META-regionserver/1bae0942de96:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-09T00:46:14,651 INFO [RS_OPEN_META-regionserver/1bae0942de96:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-09T00:46:14,652 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=1bae0942de96,36541,1733705173517 2024-12-09T00:46:14,654 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 1bae0942de96,36541,1733705173517, state=OPEN 2024-12-09T00:46:14,671 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32815-0x100081748120002, quorum=127.0.0.1:63217, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T00:46:14,671 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33411-0x100081748120003, quorum=127.0.0.1:63217, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T00:46:14,671 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36541-0x100081748120001, quorum=127.0.0.1:63217, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T00:46:14,671 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38957-0x100081748120000, quorum=127.0.0.1:63217, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T00:46:14,671 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=1bae0942de96,36541,1733705173517 2024-12-09T00:46:14,671 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T00:46:14,671 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T00:46:14,671 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T00:46:14,671 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T00:46:14,676 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-09T00:46:14,676 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=1bae0942de96,36541,1733705173517 in 232 msec 2024-12-09T00:46:14,680 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-09T00:46:14,680 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 688 msec 2024-12-09T00:46:14,681 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-09T00:46:14,682 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-09T00:46:14,683 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T00:46:14,683 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=1bae0942de96,36541,1733705173517, seqNum=-1] 2024-12-09T00:46:14,683 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T00:46:14,685 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50489, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T00:46:14,693 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 761 msec 2024-12-09T00:46:14,693 INFO [master/1bae0942de96:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733705174693, completionTime=-1 2024-12-09T00:46:14,693 INFO [master/1bae0942de96:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=3; waited=0ms, expected min=3 server(s), max=3 server(s), master is running 2024-12-09T00:46:14,693 DEBUG [master/1bae0942de96:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-09T00:46:14,696 INFO [master/1bae0942de96:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=3 2024-12-09T00:46:14,696 INFO [master/1bae0942de96:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733705234696 2024-12-09T00:46:14,696 INFO [master/1bae0942de96:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733705294696 2024-12-09T00:46:14,696 INFO [master/1bae0942de96:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 3 msec 2024-12-09T00:46:14,696 INFO [master/1bae0942de96:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1bae0942de96,38957,1733705173387-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T00:46:14,696 INFO [master/1bae0942de96:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1bae0942de96,38957,1733705173387-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T00:46:14,697 INFO [master/1bae0942de96:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1bae0942de96,38957,1733705173387-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T00:46:14,697 INFO [master/1bae0942de96:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-1bae0942de96:38957, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T00:46:14,697 INFO [master/1bae0942de96:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-09T00:46:14,697 INFO [master/1bae0942de96:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-09T00:46:14,701 DEBUG [master/1bae0942de96:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-09T00:46:14,704 INFO [master/1bae0942de96:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.066sec 2024-12-09T00:46:14,704 INFO [master/1bae0942de96:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-09T00:46:14,704 INFO [master/1bae0942de96:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-09T00:46:14,705 INFO [master/1bae0942de96:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-09T00:46:14,705 INFO [master/1bae0942de96:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-09T00:46:14,705 INFO [master/1bae0942de96:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-09T00:46:14,705 INFO [master/1bae0942de96:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1bae0942de96,38957,1733705173387-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-09T00:46:14,705 INFO [master/1bae0942de96:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1bae0942de96,38957,1733705173387-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-09T00:46:14,705 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@79af53ce, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T00:46:14,705 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 1bae0942de96,38957,-1 for getting cluster id 2024-12-09T00:46:14,706 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-09T00:46:14,707 DEBUG [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '423cde5c-15e4-4c00-97ef-b54d75535bdf' 2024-12-09T00:46:14,707 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-09T00:46:14,708 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "423cde5c-15e4-4c00-97ef-b54d75535bdf" 2024-12-09T00:46:14,708 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4cf89829, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T00:46:14,708 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [1bae0942de96,38957,-1] 2024-12-09T00:46:14,708 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-09T00:46:14,709 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T00:46:14,710 DEBUG [master/1bae0942de96:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-09T00:46:14,710 INFO [master/1bae0942de96:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-09T00:46:14,710 INFO [master/1bae0942de96:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1bae0942de96,38957,1733705173387-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T00:46:14,710 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44792, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-09T00:46:14,711 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@49ec70d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T00:46:14,711 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T00:46:14,713 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=1bae0942de96,36541,1733705173517, seqNum=-1] 2024-12-09T00:46:14,713 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T00:46:14,715 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51056, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T00:46:14,717 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=1bae0942de96,38957,1733705173387 2024-12-09T00:46:14,718 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-09T00:46:14,719 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] client.AsyncConnectionImpl(321): The fetched master address is 1bae0942de96,38957,1733705173387 2024-12-09T00:46:14,719 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@388aac13 2024-12-09T00:46:14,720 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-09T00:46:14,721 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44808, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-09T00:46:14,722 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38957 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-09T00:46:14,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38957 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC 2024-12-09T00:46:14,725 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_PRE_OPERATION 2024-12-09T00:46:14,726 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T00:46:14,726 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38957 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestHBaseWalOnEC" procId is: 4 2024-12-09T00:46:14,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38957 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-09T00:46:14,727 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-09T00:46:14,736 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46307 is added to blk_1073741837_1013 (size=392) 2024-12-09T00:46:14,736 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37429 is added to blk_1073741837_1013 (size=392) 2024-12-09T00:46:14,737 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33741 is added to blk_1073741837_1013 (size=392) 2024-12-09T00:46:14,738 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => ed06bc65a8248b608e77050fcb7f9d5f, NAME => 'TestHBaseWalOnEC,,1733705174722.ed06bc65a8248b608e77050fcb7f9d5f.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37501/user/jenkins/test-data/e75fb8a5-12ed-cfb0-e8ca-8c0181dda140 2024-12-09T00:46:14,747 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33741 is added to blk_1073741838_1014 (size=51) 2024-12-09T00:46:14,747 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46307 is added to blk_1073741838_1014 (size=51) 2024-12-09T00:46:14,748 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37429 is added to blk_1073741838_1014 (size=51) 2024-12-09T00:46:14,748 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1733705174722.ed06bc65a8248b608e77050fcb7f9d5f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T00:46:14,749 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1722): Closing ed06bc65a8248b608e77050fcb7f9d5f, disabling compactions & flushes 2024-12-09T00:46:14,749 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1733705174722.ed06bc65a8248b608e77050fcb7f9d5f. 2024-12-09T00:46:14,749 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1733705174722.ed06bc65a8248b608e77050fcb7f9d5f. 2024-12-09T00:46:14,749 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1733705174722.ed06bc65a8248b608e77050fcb7f9d5f. after waiting 0 ms 2024-12-09T00:46:14,749 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1733705174722.ed06bc65a8248b608e77050fcb7f9d5f. 2024-12-09T00:46:14,749 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1733705174722.ed06bc65a8248b608e77050fcb7f9d5f. 2024-12-09T00:46:14,749 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1676): Region close journal for ed06bc65a8248b608e77050fcb7f9d5f: Waiting for close lock at 1733705174749Disabling compacts and flushes for region at 1733705174749Disabling writes for close at 1733705174749Writing region close event to WAL at 1733705174749Closed at 1733705174749 2024-12-09T00:46:14,751 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ADD_TO_META 2024-12-09T00:46:14,752 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestHBaseWalOnEC,,1733705174722.ed06bc65a8248b608e77050fcb7f9d5f.","families":{"info":[{"qualifier":"regioninfo","vlen":50,"tag":[],"timestamp":"1733705174751"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733705174751"}]},"ts":"1733705174751"} 2024-12-09T00:46:14,755 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-09T00:46:14,757 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-09T00:46:14,757 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733705174757"}]},"ts":"1733705174757"} 2024-12-09T00:46:14,761 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLING in hbase:meta 2024-12-09T00:46:14,761 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {1bae0942de96=0} racks are {/default-rack=0} 2024-12-09T00:46:14,762 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-09T00:46:14,762 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-09T00:46:14,762 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-09T00:46:14,762 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-09T00:46:14,762 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-09T00:46:14,762 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-09T00:46:14,762 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-09T00:46:14,763 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-09T00:46:14,763 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-09T00:46:14,763 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-09T00:46:14,763 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=ed06bc65a8248b608e77050fcb7f9d5f, ASSIGN}] 2024-12-09T00:46:14,766 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=ed06bc65a8248b608e77050fcb7f9d5f, ASSIGN 2024-12-09T00:46:14,768 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=ed06bc65a8248b608e77050fcb7f9d5f, ASSIGN; state=OFFLINE, location=1bae0942de96,32815,1733705173549; forceNewPlan=false, retain=false 2024-12-09T00:46:14,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38957 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-09T00:46:14,919 INFO [1bae0942de96:38957 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-09T00:46:14,919 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=ed06bc65a8248b608e77050fcb7f9d5f, regionState=OPENING, regionLocation=1bae0942de96,32815,1733705173549 2024-12-09T00:46:14,923 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-10-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=ed06bc65a8248b608e77050fcb7f9d5f, ASSIGN because future has completed 2024-12-09T00:46:14,924 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure ed06bc65a8248b608e77050fcb7f9d5f, server=1bae0942de96,32815,1733705173549}] 2024-12-09T00:46:15,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38957 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-09T00:46:15,078 DEBUG [RSProcedureDispatcher-pool-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-09T00:46:15,082 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-9-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34487, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-09T00:46:15,091 INFO [RS_OPEN_REGION-regionserver/1bae0942de96:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestHBaseWalOnEC,,1733705174722.ed06bc65a8248b608e77050fcb7f9d5f. 2024-12-09T00:46:15,091 DEBUG [RS_OPEN_REGION-regionserver/1bae0942de96:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => ed06bc65a8248b608e77050fcb7f9d5f, NAME => 'TestHBaseWalOnEC,,1733705174722.ed06bc65a8248b608e77050fcb7f9d5f.', STARTKEY => '', ENDKEY => ''} 2024-12-09T00:46:15,091 DEBUG [RS_OPEN_REGION-regionserver/1bae0942de96:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestHBaseWalOnEC ed06bc65a8248b608e77050fcb7f9d5f 2024-12-09T00:46:15,091 DEBUG [RS_OPEN_REGION-regionserver/1bae0942de96:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1733705174722.ed06bc65a8248b608e77050fcb7f9d5f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T00:46:15,091 DEBUG [RS_OPEN_REGION-regionserver/1bae0942de96:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for ed06bc65a8248b608e77050fcb7f9d5f 2024-12-09T00:46:15,092 DEBUG [RS_OPEN_REGION-regionserver/1bae0942de96:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for ed06bc65a8248b608e77050fcb7f9d5f 2024-12-09T00:46:15,093 INFO [StoreOpener-ed06bc65a8248b608e77050fcb7f9d5f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region ed06bc65a8248b608e77050fcb7f9d5f 2024-12-09T00:46:15,095 INFO [StoreOpener-ed06bc65a8248b608e77050fcb7f9d5f-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region ed06bc65a8248b608e77050fcb7f9d5f columnFamilyName cf 2024-12-09T00:46:15,095 DEBUG [StoreOpener-ed06bc65a8248b608e77050fcb7f9d5f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T00:46:15,096 INFO [StoreOpener-ed06bc65a8248b608e77050fcb7f9d5f-1 {}] regionserver.HStore(327): Store=ed06bc65a8248b608e77050fcb7f9d5f/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T00:46:15,096 DEBUG [RS_OPEN_REGION-regionserver/1bae0942de96:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for ed06bc65a8248b608e77050fcb7f9d5f 2024-12-09T00:46:15,097 DEBUG [RS_OPEN_REGION-regionserver/1bae0942de96:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37501/user/jenkins/test-data/e75fb8a5-12ed-cfb0-e8ca-8c0181dda140/data/default/TestHBaseWalOnEC/ed06bc65a8248b608e77050fcb7f9d5f 2024-12-09T00:46:15,098 DEBUG [RS_OPEN_REGION-regionserver/1bae0942de96:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37501/user/jenkins/test-data/e75fb8a5-12ed-cfb0-e8ca-8c0181dda140/data/default/TestHBaseWalOnEC/ed06bc65a8248b608e77050fcb7f9d5f 2024-12-09T00:46:15,098 DEBUG [RS_OPEN_REGION-regionserver/1bae0942de96:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for ed06bc65a8248b608e77050fcb7f9d5f 2024-12-09T00:46:15,098 DEBUG [RS_OPEN_REGION-regionserver/1bae0942de96:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for ed06bc65a8248b608e77050fcb7f9d5f 2024-12-09T00:46:15,101 DEBUG [RS_OPEN_REGION-regionserver/1bae0942de96:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for ed06bc65a8248b608e77050fcb7f9d5f 2024-12-09T00:46:15,104 DEBUG [RS_OPEN_REGION-regionserver/1bae0942de96:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37501/user/jenkins/test-data/e75fb8a5-12ed-cfb0-e8ca-8c0181dda140/data/default/TestHBaseWalOnEC/ed06bc65a8248b608e77050fcb7f9d5f/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T00:46:15,105 INFO [RS_OPEN_REGION-regionserver/1bae0942de96:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened ed06bc65a8248b608e77050fcb7f9d5f; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=60557242, jitterRate=-0.09762677550315857}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-09T00:46:15,105 DEBUG [RS_OPEN_REGION-regionserver/1bae0942de96:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for ed06bc65a8248b608e77050fcb7f9d5f 2024-12-09T00:46:15,106 DEBUG [RS_OPEN_REGION-regionserver/1bae0942de96:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for ed06bc65a8248b608e77050fcb7f9d5f: Running coprocessor pre-open hook at 1733705175092Writing region info on filesystem at 1733705175092Initializing all the Stores at 1733705175093 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733705175093Cleaning up temporary data from old regions at 1733705175098 (+5 ms)Running coprocessor post-open hooks at 1733705175105 (+7 ms)Region opened successfully at 1733705175106 (+1 ms) 2024-12-09T00:46:15,108 INFO [RS_OPEN_REGION-regionserver/1bae0942de96:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestHBaseWalOnEC,,1733705174722.ed06bc65a8248b608e77050fcb7f9d5f., pid=6, masterSystemTime=1733705175077 2024-12-09T00:46:15,112 DEBUG [RS_OPEN_REGION-regionserver/1bae0942de96:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestHBaseWalOnEC,,1733705174722.ed06bc65a8248b608e77050fcb7f9d5f. 2024-12-09T00:46:15,112 INFO [RS_OPEN_REGION-regionserver/1bae0942de96:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestHBaseWalOnEC,,1733705174722.ed06bc65a8248b608e77050fcb7f9d5f. 2024-12-09T00:46:15,114 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=ed06bc65a8248b608e77050fcb7f9d5f, regionState=OPEN, openSeqNum=2, regionLocation=1bae0942de96,32815,1733705173549 2024-12-09T00:46:15,117 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-10-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure ed06bc65a8248b608e77050fcb7f9d5f, server=1bae0942de96,32815,1733705173549 because future has completed 2024-12-09T00:46:15,123 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-09T00:46:15,124 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure ed06bc65a8248b608e77050fcb7f9d5f, server=1bae0942de96,32815,1733705173549 in 196 msec 2024-12-09T00:46:15,128 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-09T00:46:15,128 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=ed06bc65a8248b608e77050fcb7f9d5f, ASSIGN in 360 msec 2024-12-09T00:46:15,129 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-09T00:46:15,130 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733705175129"}]},"ts":"1733705175129"} 2024-12-09T00:46:15,133 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLED in hbase:meta 2024-12-09T00:46:15,135 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_POST_OPERATION 2024-12-09T00:46:15,138 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC in 413 msec 2024-12-09T00:46:15,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38957 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-09T00:46:15,359 INFO [RPCClient-NioEventLoopGroup-6-9 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestHBaseWalOnEC completed 2024-12-09T00:46:15,360 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table TestHBaseWalOnEC get assigned. Timeout = 60000ms 2024-12-09T00:46:15,360 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-09T00:46:15,367 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table TestHBaseWalOnEC assigned to meta. Checking AM states. 2024-12-09T00:46:15,367 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-09T00:46:15,367 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table TestHBaseWalOnEC assigned. 2024-12-09T00:46:15,373 DEBUG [RPCClient-NioEventLoopGroup-6-8 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestHBaseWalOnEC', row='row', locateType=CURRENT is [region=TestHBaseWalOnEC,,1733705174722.ed06bc65a8248b608e77050fcb7f9d5f., hostname=1bae0942de96,32815,1733705173549, seqNum=2] 2024-12-09T00:46:15,373 DEBUG [RPCClient-NioEventLoopGroup-6-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T00:46:15,376 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-9-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49770, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T00:46:15,381 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38957 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestHBaseWalOnEC 2024-12-09T00:46:15,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38957 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC 2024-12-09T00:46:15,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38957 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-09T00:46:15,385 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_PREPARE 2024-12-09T00:46:15,387 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-09T00:46:15,387 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-09T00:46:15,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38957 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-09T00:46:15,544 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=32815 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-12-09T00:46:15,544 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1bae0942de96:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestHBaseWalOnEC,,1733705174722.ed06bc65a8248b608e77050fcb7f9d5f. 2024-12-09T00:46:15,545 INFO [RS_FLUSH_OPERATIONS-regionserver/1bae0942de96:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing ed06bc65a8248b608e77050fcb7f9d5f 1/1 column families, dataSize=32 B heapSize=360 B 2024-12-09T00:46:15,566 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1bae0942de96:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37501/user/jenkins/test-data/e75fb8a5-12ed-cfb0-e8ca-8c0181dda140/data/default/TestHBaseWalOnEC/ed06bc65a8248b608e77050fcb7f9d5f/.tmp/cf/b970a253a3ec49ae910f7d6f68805b59 is 36, key is row/cf:cq/1733705175377/Put/seqid=0 2024-12-09T00:46:15,576 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46307 is added to blk_1073741839_1015 (size=4787) 2024-12-09T00:46:15,576 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33741 is added to blk_1073741839_1015 (size=4787) 2024-12-09T00:46:15,576 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37429 is added to blk_1073741839_1015 (size=4787) 2024-12-09T00:46:15,577 INFO [RS_FLUSH_OPERATIONS-regionserver/1bae0942de96:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=32 B at sequenceid=5 (bloomFilter=false), to=hdfs://localhost:37501/user/jenkins/test-data/e75fb8a5-12ed-cfb0-e8ca-8c0181dda140/data/default/TestHBaseWalOnEC/ed06bc65a8248b608e77050fcb7f9d5f/.tmp/cf/b970a253a3ec49ae910f7d6f68805b59 2024-12-09T00:46:15,587 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1bae0942de96:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37501/user/jenkins/test-data/e75fb8a5-12ed-cfb0-e8ca-8c0181dda140/data/default/TestHBaseWalOnEC/ed06bc65a8248b608e77050fcb7f9d5f/.tmp/cf/b970a253a3ec49ae910f7d6f68805b59 as hdfs://localhost:37501/user/jenkins/test-data/e75fb8a5-12ed-cfb0-e8ca-8c0181dda140/data/default/TestHBaseWalOnEC/ed06bc65a8248b608e77050fcb7f9d5f/cf/b970a253a3ec49ae910f7d6f68805b59 2024-12-09T00:46:15,596 INFO [RS_FLUSH_OPERATIONS-regionserver/1bae0942de96:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37501/user/jenkins/test-data/e75fb8a5-12ed-cfb0-e8ca-8c0181dda140/data/default/TestHBaseWalOnEC/ed06bc65a8248b608e77050fcb7f9d5f/cf/b970a253a3ec49ae910f7d6f68805b59, entries=1, sequenceid=5, filesize=4.7 K 2024-12-09T00:46:15,597 INFO [RS_FLUSH_OPERATIONS-regionserver/1bae0942de96:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~32 B/32, heapSize ~344 B/344, currentSize=0 B/0 for ed06bc65a8248b608e77050fcb7f9d5f in 53ms, sequenceid=5, compaction requested=false 2024-12-09T00:46:15,597 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1bae0942de96:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for ed06bc65a8248b608e77050fcb7f9d5f: 2024-12-09T00:46:15,598 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1bae0942de96:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestHBaseWalOnEC,,1733705174722.ed06bc65a8248b608e77050fcb7f9d5f. 2024-12-09T00:46:15,598 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1bae0942de96:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-12-09T00:46:15,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38957 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-12-09T00:46:15,604 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-12-09T00:46:15,604 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 213 msec 2024-12-09T00:46:15,607 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC in 224 msec 2024-12-09T00:46:15,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38957 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-09T00:46:15,699 INFO [RPCClient-NioEventLoopGroup-6-9 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestHBaseWalOnEC completed 2024-12-09T00:46:15,705 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-09T00:46:15,705 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-09T00:46:15,706 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T00:46:15,706 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T00:46:15,706 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T00:46:15,706 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-09T00:46:15,706 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-09T00:46:15,707 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1207789188, stopped=false 2024-12-09T00:46:15,707 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=1bae0942de96,38957,1733705173387 2024-12-09T00:46:15,771 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32815-0x100081748120002, quorum=127.0.0.1:63217, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-09T00:46:15,771 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36541-0x100081748120001, quorum=127.0.0.1:63217, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-09T00:46:15,771 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33411-0x100081748120003, quorum=127.0.0.1:63217, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-09T00:46:15,771 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38957-0x100081748120000, quorum=127.0.0.1:63217, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-09T00:46:15,771 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32815-0x100081748120002, quorum=127.0.0.1:63217, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T00:46:15,771 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33411-0x100081748120003, quorum=127.0.0.1:63217, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T00:46:15,771 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38957-0x100081748120000, quorum=127.0.0.1:63217, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T00:46:15,771 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-09T00:46:15,771 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36541-0x100081748120001, quorum=127.0.0.1:63217, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T00:46:15,771 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-09T00:46:15,771 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T00:46:15,772 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T00:46:15,772 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:33411-0x100081748120003, quorum=127.0.0.1:63217, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T00:46:15,772 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '1bae0942de96,36541,1733705173517' ***** 2024-12-09T00:46:15,772 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-09T00:46:15,772 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '1bae0942de96,32815,1733705173549' ***** 2024-12-09T00:46:15,772 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:36541-0x100081748120001, quorum=127.0.0.1:63217, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T00:46:15,772 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:38957-0x100081748120000, quorum=127.0.0.1:63217, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T00:46:15,772 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-09T00:46:15,772 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:32815-0x100081748120002, quorum=127.0.0.1:63217, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T00:46:15,772 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '1bae0942de96,33411,1733705173582' ***** 2024-12-09T00:46:15,772 INFO [RS:0;1bae0942de96:36541 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-09T00:46:15,772 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-09T00:46:15,772 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-09T00:46:15,773 INFO [RS:1;1bae0942de96:32815 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-09T00:46:15,773 INFO [RS:2;1bae0942de96:33411 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-09T00:46:15,773 INFO [RS:1;1bae0942de96:32815 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-09T00:46:15,773 INFO [RS:2;1bae0942de96:33411 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-09T00:46:15,773 INFO [RS:2;1bae0942de96:33411 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-09T00:46:15,773 INFO [RS:0;1bae0942de96:36541 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-09T00:46:15,773 INFO [RS:2;1bae0942de96:33411 {}] regionserver.HRegionServer(959): stopping server 1bae0942de96,33411,1733705173582 2024-12-09T00:46:15,773 INFO [RS:2;1bae0942de96:33411 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-09T00:46:15,773 INFO [RS:1;1bae0942de96:32815 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-09T00:46:15,773 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-09T00:46:15,773 INFO [RS:0;1bae0942de96:36541 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-09T00:46:15,773 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-09T00:46:15,774 INFO [RS:2;1bae0942de96:33411 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:2;1bae0942de96:33411. 2024-12-09T00:46:15,774 INFO [RS:1;1bae0942de96:32815 {}] regionserver.HRegionServer(3091): Received CLOSE for ed06bc65a8248b608e77050fcb7f9d5f 2024-12-09T00:46:15,774 INFO [RS:0;1bae0942de96:36541 {}] regionserver.HRegionServer(959): stopping server 1bae0942de96,36541,1733705173517 2024-12-09T00:46:15,774 INFO [RS:0;1bae0942de96:36541 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-09T00:46:15,774 DEBUG [RS:2;1bae0942de96:33411 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T00:46:15,774 INFO [RS:0;1bae0942de96:36541 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;1bae0942de96:36541. 2024-12-09T00:46:15,774 DEBUG [RS:2;1bae0942de96:33411 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T00:46:15,774 INFO [RS:1;1bae0942de96:32815 {}] regionserver.HRegionServer(959): stopping server 1bae0942de96,32815,1733705173549 2024-12-09T00:46:15,774 DEBUG [RS:0;1bae0942de96:36541 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T00:46:15,774 INFO [RS:2;1bae0942de96:33411 {}] regionserver.HRegionServer(976): stopping server 1bae0942de96,33411,1733705173582; all regions closed. 2024-12-09T00:46:15,774 INFO [RS:1;1bae0942de96:32815 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-09T00:46:15,774 DEBUG [RS:0;1bae0942de96:36541 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T00:46:15,774 INFO [RS:1;1bae0942de96:32815 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;1bae0942de96:32815. 2024-12-09T00:46:15,775 DEBUG [RS:1;1bae0942de96:32815 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T00:46:15,775 INFO [RS:0;1bae0942de96:36541 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-09T00:46:15,775 DEBUG [RS_CLOSE_REGION-regionserver/1bae0942de96:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing ed06bc65a8248b608e77050fcb7f9d5f, disabling compactions & flushes 2024-12-09T00:46:15,775 INFO [RS:0;1bae0942de96:36541 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-09T00:46:15,775 DEBUG [RS:1;1bae0942de96:32815 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T00:46:15,775 INFO [RS:0;1bae0942de96:36541 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-09T00:46:15,775 INFO [RS_CLOSE_REGION-regionserver/1bae0942de96:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1733705174722.ed06bc65a8248b608e77050fcb7f9d5f. 2024-12-09T00:46:15,775 INFO [RS:0;1bae0942de96:36541 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-09T00:46:15,775 INFO [RS:1;1bae0942de96:32815 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-12-09T00:46:15,775 DEBUG [RS_CLOSE_REGION-regionserver/1bae0942de96:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1733705174722.ed06bc65a8248b608e77050fcb7f9d5f. 2024-12-09T00:46:15,775 DEBUG [RS:1;1bae0942de96:32815 {}] regionserver.HRegionServer(1325): Online Regions={ed06bc65a8248b608e77050fcb7f9d5f=TestHBaseWalOnEC,,1733705174722.ed06bc65a8248b608e77050fcb7f9d5f.} 2024-12-09T00:46:15,775 DEBUG [RS_CLOSE_REGION-regionserver/1bae0942de96:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1733705174722.ed06bc65a8248b608e77050fcb7f9d5f. after waiting 0 ms 2024-12-09T00:46:15,775 DEBUG [RS_CLOSE_REGION-regionserver/1bae0942de96:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1733705174722.ed06bc65a8248b608e77050fcb7f9d5f. 2024-12-09T00:46:15,775 DEBUG [RS:1;1bae0942de96:32815 {}] regionserver.HRegionServer(1351): Waiting on ed06bc65a8248b608e77050fcb7f9d5f 2024-12-09T00:46:15,775 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T00:46:15,775 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T00:46:15,775 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T00:46:15,776 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T00:46:15,776 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T00:46:15,779 INFO [RS:0;1bae0942de96:36541 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-12-09T00:46:15,779 DEBUG [RS:0;1bae0942de96:36541 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-12-09T00:46:15,779 DEBUG [RS:0;1bae0942de96:36541 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-12-09T00:46:15,779 DEBUG [RS_CLOSE_META-regionserver/1bae0942de96:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-09T00:46:15,779 INFO [RS_CLOSE_META-regionserver/1bae0942de96:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-09T00:46:15,779 DEBUG [RS_CLOSE_META-regionserver/1bae0942de96:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-09T00:46:15,779 DEBUG [RS_CLOSE_META-regionserver/1bae0942de96:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-09T00:46:15,779 DEBUG [RS_CLOSE_META-regionserver/1bae0942de96:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-09T00:46:15,780 INFO [RS_CLOSE_META-regionserver/1bae0942de96:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.34 KB heapSize=3.38 KB 2024-12-09T00:46:15,782 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46307 is added to blk_1073741835_1011 (size=93) 2024-12-09T00:46:15,782 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33741 is added to blk_1073741835_1011 (size=93) 2024-12-09T00:46:15,782 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37429 is added to blk_1073741835_1011 (size=93) 2024-12-09T00:46:15,785 DEBUG [RS:2;1bae0942de96:33411 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/e75fb8a5-12ed-cfb0-e8ca-8c0181dda140/oldWALs 2024-12-09T00:46:15,785 INFO [RS:2;1bae0942de96:33411 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 1bae0942de96%2C33411%2C1733705173582:(num 1733705174275) 2024-12-09T00:46:15,785 DEBUG [RS:2;1bae0942de96:33411 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T00:46:15,785 INFO [RS:2;1bae0942de96:33411 {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T00:46:15,785 INFO [RS:2;1bae0942de96:33411 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-09T00:46:15,785 INFO [RS:2;1bae0942de96:33411 {}] hbase.ChoreService(370): Chore service for: regionserver/1bae0942de96:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-09T00:46:15,786 INFO [RS:2;1bae0942de96:33411 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-09T00:46:15,786 INFO [regionserver/1bae0942de96:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-09T00:46:15,786 INFO [RS:2;1bae0942de96:33411 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-09T00:46:15,786 INFO [RS:2;1bae0942de96:33411 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-09T00:46:15,786 INFO [RS:2;1bae0942de96:33411 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-09T00:46:15,786 INFO [RS:2;1bae0942de96:33411 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:33411 2024-12-09T00:46:15,787 DEBUG [RS_CLOSE_REGION-regionserver/1bae0942de96:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37501/user/jenkins/test-data/e75fb8a5-12ed-cfb0-e8ca-8c0181dda140/data/default/TestHBaseWalOnEC/ed06bc65a8248b608e77050fcb7f9d5f/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-09T00:46:15,788 INFO [RS_CLOSE_REGION-regionserver/1bae0942de96:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1733705174722.ed06bc65a8248b608e77050fcb7f9d5f. 2024-12-09T00:46:15,788 DEBUG [RS_CLOSE_REGION-regionserver/1bae0942de96:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for ed06bc65a8248b608e77050fcb7f9d5f: Waiting for close lock at 1733705175774Running coprocessor pre-close hooks at 1733705175775 (+1 ms)Disabling compacts and flushes for region at 1733705175775Disabling writes for close at 1733705175775Writing region close event to WAL at 1733705175779 (+4 ms)Running coprocessor post-close hooks at 1733705175788 (+9 ms)Closed at 1733705175788 2024-12-09T00:46:15,789 DEBUG [RS_CLOSE_REGION-regionserver/1bae0942de96:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestHBaseWalOnEC,,1733705174722.ed06bc65a8248b608e77050fcb7f9d5f. 2024-12-09T00:46:15,795 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38957-0x100081748120000, quorum=127.0.0.1:63217, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-09T00:46:15,795 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33411-0x100081748120003, quorum=127.0.0.1:63217, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/1bae0942de96,33411,1733705173582 2024-12-09T00:46:15,796 INFO [RS:2;1bae0942de96:33411 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-09T00:46:15,798 DEBUG [RS_CLOSE_META-regionserver/1bae0942de96:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37501/user/jenkins/test-data/e75fb8a5-12ed-cfb0-e8ca-8c0181dda140/data/hbase/meta/1588230740/.tmp/info/5cb9208a79a341b3b1113cd6298de88c is 153, key is TestHBaseWalOnEC,,1733705174722.ed06bc65a8248b608e77050fcb7f9d5f./info:regioninfo/1733705175113/Put/seqid=0 2024-12-09T00:46:15,804 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [1bae0942de96,33411,1733705173582] 2024-12-09T00:46:15,804 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37429 is added to blk_1073741840_1016 (size=6637) 2024-12-09T00:46:15,804 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46307 is added to blk_1073741840_1016 (size=6637) 2024-12-09T00:46:15,805 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33741 is added to blk_1073741840_1016 (size=6637) 2024-12-09T00:46:15,808 INFO [RS_CLOSE_META-regionserver/1bae0942de96:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.18 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:37501/user/jenkins/test-data/e75fb8a5-12ed-cfb0-e8ca-8c0181dda140/data/hbase/meta/1588230740/.tmp/info/5cb9208a79a341b3b1113cd6298de88c 2024-12-09T00:46:15,812 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/1bae0942de96,33411,1733705173582 already deleted, retry=false 2024-12-09T00:46:15,812 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 1bae0942de96,33411,1733705173582 expired; onlineServers=2 2024-12-09T00:46:15,828 DEBUG [RS_CLOSE_META-regionserver/1bae0942de96:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37501/user/jenkins/test-data/e75fb8a5-12ed-cfb0-e8ca-8c0181dda140/data/hbase/meta/1588230740/.tmp/ns/e4e02236ea4a4ec18dd9ba2bce39e7da is 43, key is default/ns:d/1733705174686/Put/seqid=0 2024-12-09T00:46:15,834 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46307 is added to blk_1073741841_1017 (size=5153) 2024-12-09T00:46:15,835 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37429 is added to blk_1073741841_1017 (size=5153) 2024-12-09T00:46:15,835 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33741 is added to blk_1073741841_1017 (size=5153) 2024-12-09T00:46:15,835 INFO [RS_CLOSE_META-regionserver/1bae0942de96:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:37501/user/jenkins/test-data/e75fb8a5-12ed-cfb0-e8ca-8c0181dda140/data/hbase/meta/1588230740/.tmp/ns/e4e02236ea4a4ec18dd9ba2bce39e7da 2024-12-09T00:46:15,838 INFO [regionserver/1bae0942de96:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T00:46:15,842 INFO [regionserver/1bae0942de96:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T00:46:15,842 INFO [regionserver/1bae0942de96:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T00:46:15,858 DEBUG [RS_CLOSE_META-regionserver/1bae0942de96:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37501/user/jenkins/test-data/e75fb8a5-12ed-cfb0-e8ca-8c0181dda140/data/hbase/meta/1588230740/.tmp/table/9ce37b37580d4501a171e18e3931d4b2 is 52, key is TestHBaseWalOnEC/table:state/1733705175129/Put/seqid=0 2024-12-09T00:46:15,864 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37429 is added to blk_1073741842_1018 (size=5249) 2024-12-09T00:46:15,864 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46307 is added to blk_1073741842_1018 (size=5249) 2024-12-09T00:46:15,865 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33741 is added to blk_1073741842_1018 (size=5249) 2024-12-09T00:46:15,865 INFO [RS_CLOSE_META-regionserver/1bae0942de96:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=96 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:37501/user/jenkins/test-data/e75fb8a5-12ed-cfb0-e8ca-8c0181dda140/data/hbase/meta/1588230740/.tmp/table/9ce37b37580d4501a171e18e3931d4b2 2024-12-09T00:46:15,873 DEBUG [RS_CLOSE_META-regionserver/1bae0942de96:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37501/user/jenkins/test-data/e75fb8a5-12ed-cfb0-e8ca-8c0181dda140/data/hbase/meta/1588230740/.tmp/info/5cb9208a79a341b3b1113cd6298de88c as hdfs://localhost:37501/user/jenkins/test-data/e75fb8a5-12ed-cfb0-e8ca-8c0181dda140/data/hbase/meta/1588230740/info/5cb9208a79a341b3b1113cd6298de88c 2024-12-09T00:46:15,881 INFO [RS_CLOSE_META-regionserver/1bae0942de96:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37501/user/jenkins/test-data/e75fb8a5-12ed-cfb0-e8ca-8c0181dda140/data/hbase/meta/1588230740/info/5cb9208a79a341b3b1113cd6298de88c, entries=10, sequenceid=11, filesize=6.5 K 2024-12-09T00:46:15,883 DEBUG [RS_CLOSE_META-regionserver/1bae0942de96:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37501/user/jenkins/test-data/e75fb8a5-12ed-cfb0-e8ca-8c0181dda140/data/hbase/meta/1588230740/.tmp/ns/e4e02236ea4a4ec18dd9ba2bce39e7da as hdfs://localhost:37501/user/jenkins/test-data/e75fb8a5-12ed-cfb0-e8ca-8c0181dda140/data/hbase/meta/1588230740/ns/e4e02236ea4a4ec18dd9ba2bce39e7da 2024-12-09T00:46:15,890 INFO [RS_CLOSE_META-regionserver/1bae0942de96:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37501/user/jenkins/test-data/e75fb8a5-12ed-cfb0-e8ca-8c0181dda140/data/hbase/meta/1588230740/ns/e4e02236ea4a4ec18dd9ba2bce39e7da, entries=2, sequenceid=11, filesize=5.0 K 2024-12-09T00:46:15,891 DEBUG [RS_CLOSE_META-regionserver/1bae0942de96:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37501/user/jenkins/test-data/e75fb8a5-12ed-cfb0-e8ca-8c0181dda140/data/hbase/meta/1588230740/.tmp/table/9ce37b37580d4501a171e18e3931d4b2 as hdfs://localhost:37501/user/jenkins/test-data/e75fb8a5-12ed-cfb0-e8ca-8c0181dda140/data/hbase/meta/1588230740/table/9ce37b37580d4501a171e18e3931d4b2 2024-12-09T00:46:15,898 INFO [RS_CLOSE_META-regionserver/1bae0942de96:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37501/user/jenkins/test-data/e75fb8a5-12ed-cfb0-e8ca-8c0181dda140/data/hbase/meta/1588230740/table/9ce37b37580d4501a171e18e3931d4b2, entries=2, sequenceid=11, filesize=5.1 K 2024-12-09T00:46:15,899 INFO [RS_CLOSE_META-regionserver/1bae0942de96:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 120ms, sequenceid=11, compaction requested=false 2024-12-09T00:46:15,904 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33411-0x100081748120003, quorum=127.0.0.1:63217, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T00:46:15,904 INFO [RS:2;1bae0942de96:33411 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-09T00:46:15,904 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33411-0x100081748120003, quorum=127.0.0.1:63217, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T00:46:15,904 INFO [RS:2;1bae0942de96:33411 {}] regionserver.HRegionServer(1031): Exiting; stopping=1bae0942de96,33411,1733705173582; zookeeper connection closed. 2024-12-09T00:46:15,904 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@7dbfb8ba {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@7dbfb8ba 2024-12-09T00:46:15,905 DEBUG [RS_CLOSE_META-regionserver/1bae0942de96:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37501/user/jenkins/test-data/e75fb8a5-12ed-cfb0-e8ca-8c0181dda140/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-12-09T00:46:15,905 DEBUG [RS_CLOSE_META-regionserver/1bae0942de96:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-09T00:46:15,905 INFO [RS_CLOSE_META-regionserver/1bae0942de96:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-09T00:46:15,906 DEBUG [RS_CLOSE_META-regionserver/1bae0942de96:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733705175779Running coprocessor pre-close hooks at 1733705175779Disabling compacts and flushes for region at 1733705175779Disabling writes for close at 1733705175779Obtaining lock to block concurrent updates at 1733705175780 (+1 ms)Preparing flush snapshotting stores in 1588230740 at 1733705175780Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1377, getHeapSize=3392, getOffHeapSize=0, getCellsCount=14 at 1733705175780Flushing stores of hbase:meta,,1.1588230740 at 1733705175781 (+1 ms)Flushing 1588230740/info: creating writer at 1733705175781Flushing 1588230740/info: appending metadata at 1733705175797 (+16 ms)Flushing 1588230740/info: closing flushed file at 1733705175798 (+1 ms)Flushing 1588230740/ns: creating writer at 1733705175815 (+17 ms)Flushing 1588230740/ns: appending metadata at 1733705175828 (+13 ms)Flushing 1588230740/ns: closing flushed file at 1733705175828Flushing 1588230740/table: creating writer at 1733705175843 (+15 ms)Flushing 1588230740/table: appending metadata at 1733705175857 (+14 ms)Flushing 1588230740/table: closing flushed file at 1733705175857Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2154f8ae: reopening flushed file at 1733705175872 (+15 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@69b6a8a6: reopening flushed file at 1733705175881 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@23aeb5a7: reopening flushed file at 1733705175890 (+9 ms)Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 120ms, sequenceid=11, compaction requested=false at 1733705175899 (+9 ms)Writing region close event to WAL at 1733705175901 (+2 ms)Running coprocessor post-close hooks at 1733705175905 (+4 ms)Closed at 1733705175905 2024-12-09T00:46:15,906 DEBUG [RS_CLOSE_META-regionserver/1bae0942de96:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-09T00:46:15,975 INFO [RS:1;1bae0942de96:32815 {}] regionserver.HRegionServer(976): stopping server 1bae0942de96,32815,1733705173549; all regions closed. 2024-12-09T00:46:15,976 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T00:46:15,976 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T00:46:15,976 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T00:46:15,976 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T00:46:15,977 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T00:46:15,979 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46307 is added to blk_1073741834_1010 (size=1298) 2024-12-09T00:46:15,979 INFO [RS:0;1bae0942de96:36541 {}] regionserver.HRegionServer(976): stopping server 1bae0942de96,36541,1733705173517; all regions closed. 2024-12-09T00:46:15,979 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37429 is added to blk_1073741834_1010 (size=1298) 2024-12-09T00:46:15,979 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33741 is added to blk_1073741834_1010 (size=1298) 2024-12-09T00:46:15,980 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T00:46:15,980 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T00:46:15,980 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T00:46:15,980 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T00:46:15,980 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T00:46:15,983 DEBUG [RS:1;1bae0942de96:32815 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/e75fb8a5-12ed-cfb0-e8ca-8c0181dda140/oldWALs 2024-12-09T00:46:15,983 INFO [RS:1;1bae0942de96:32815 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 1bae0942de96%2C32815%2C1733705173549:(num 1733705174275) 2024-12-09T00:46:15,983 DEBUG [RS:1;1bae0942de96:32815 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T00:46:15,983 INFO [RS:1;1bae0942de96:32815 {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T00:46:15,983 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33741 is added to blk_1073741836_1012 (size=2751) 2024-12-09T00:46:15,983 INFO [RS:1;1bae0942de96:32815 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-09T00:46:15,983 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37429 is added to blk_1073741836_1012 (size=2751) 2024-12-09T00:46:15,983 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46307 is added to blk_1073741836_1012 (size=2751) 2024-12-09T00:46:15,983 INFO [RS:1;1bae0942de96:32815 {}] hbase.ChoreService(370): Chore service for: regionserver/1bae0942de96:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-09T00:46:15,983 INFO [RS:1;1bae0942de96:32815 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-09T00:46:15,983 INFO [regionserver/1bae0942de96:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-09T00:46:15,984 INFO [RS:1;1bae0942de96:32815 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-09T00:46:15,984 INFO [RS:1;1bae0942de96:32815 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-09T00:46:15,984 INFO [RS:1;1bae0942de96:32815 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-09T00:46:15,984 INFO [RS:1;1bae0942de96:32815 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:32815 2024-12-09T00:46:15,986 DEBUG [RS:0;1bae0942de96:36541 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/e75fb8a5-12ed-cfb0-e8ca-8c0181dda140/oldWALs 2024-12-09T00:46:15,986 INFO [RS:0;1bae0942de96:36541 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 1bae0942de96%2C36541%2C1733705173517.meta:.meta(num 1733705174611) 2024-12-09T00:46:15,987 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T00:46:15,987 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T00:46:15,987 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T00:46:15,987 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T00:46:15,988 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T00:46:15,990 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37429 is added to blk_1073741833_1009 (size=93) 2024-12-09T00:46:15,990 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46307 is added to blk_1073741833_1009 (size=93) 2024-12-09T00:46:15,990 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33741 is added to blk_1073741833_1009 (size=93) 2024-12-09T00:46:15,993 DEBUG [RS:0;1bae0942de96:36541 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/e75fb8a5-12ed-cfb0-e8ca-8c0181dda140/oldWALs 2024-12-09T00:46:15,993 INFO [RS:0;1bae0942de96:36541 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 1bae0942de96%2C36541%2C1733705173517:(num 1733705174271) 2024-12-09T00:46:15,993 DEBUG [RS:0;1bae0942de96:36541 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T00:46:15,993 INFO [RS:0;1bae0942de96:36541 {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T00:46:15,993 INFO [RS:0;1bae0942de96:36541 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-09T00:46:15,993 INFO [RS:0;1bae0942de96:36541 {}] hbase.ChoreService(370): Chore service for: regionserver/1bae0942de96:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-09T00:46:15,993 INFO [RS:0;1bae0942de96:36541 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-09T00:46:15,993 INFO [regionserver/1bae0942de96:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-09T00:46:15,993 INFO [RS:0;1bae0942de96:36541 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:36541 2024-12-09T00:46:16,012 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32815-0x100081748120002, quorum=127.0.0.1:63217, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/1bae0942de96,32815,1733705173549 2024-12-09T00:46:16,012 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38957-0x100081748120000, quorum=127.0.0.1:63217, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-09T00:46:16,012 INFO [RS:1;1bae0942de96:32815 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-09T00:46:16,013 ERROR [Time-limited test-EventThread {}] zookeeper.ClientCnxn$EventThread(581): Error while calling watcher. java.util.concurrent.RejectedExecutionException: Task org.apache.hadoop.hbase.trace.TraceUtil$$Lambda$370/0x00007febac8f40f0@450b75bb rejected from java.util.concurrent.ThreadPoolExecutor@9f37e[Terminated, pool size = 0, active threads = 0, queued tasks = 0, completed tasks = 14] at java.util.concurrent.ThreadPoolExecutor$AbortPolicy.rejectedExecution(ThreadPoolExecutor.java:2065) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.reject(ThreadPoolExecutor.java:833) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.execute(ThreadPoolExecutor.java:1365) ~[?:?] at java.util.concurrent.Executors$DelegatedExecutorService.execute(Executors.java:721) ~[?:?] at org.apache.hadoop.hbase.zookeeper.ZKWatcher.process(ZKWatcher.java:613) ~[hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.zookeeper.ClientCnxn$EventThread.processEvent(ClientCnxn.java:579) ~[zookeeper-3.8.4.jar:3.8.4] at org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:554) ~[zookeeper-3.8.4.jar:3.8.4] 2024-12-09T00:46:16,021 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36541-0x100081748120001, quorum=127.0.0.1:63217, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/1bae0942de96,36541,1733705173517 2024-12-09T00:46:16,021 INFO [RS:0;1bae0942de96:36541 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-09T00:46:16,021 ERROR [Time-limited test-EventThread {}] zookeeper.ClientCnxn$EventThread(581): Error while calling watcher. java.util.concurrent.RejectedExecutionException: Task org.apache.hadoop.hbase.trace.TraceUtil$$Lambda$370/0x00007febac8f40f0@22b50163 rejected from java.util.concurrent.ThreadPoolExecutor@268a91ed[Shutting down, pool size = 1, active threads = 0, queued tasks = 0, completed tasks = 14] at java.util.concurrent.ThreadPoolExecutor$AbortPolicy.rejectedExecution(ThreadPoolExecutor.java:2065) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.reject(ThreadPoolExecutor.java:833) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.execute(ThreadPoolExecutor.java:1365) ~[?:?] at java.util.concurrent.Executors$DelegatedExecutorService.execute(Executors.java:721) ~[?:?] at org.apache.hadoop.hbase.zookeeper.ZKWatcher.process(ZKWatcher.java:613) ~[hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.zookeeper.ClientCnxn$EventThread.processEvent(ClientCnxn.java:579) ~[zookeeper-3.8.4.jar:3.8.4] at org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:554) ~[zookeeper-3.8.4.jar:3.8.4] 2024-12-09T00:46:16,029 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [1bae0942de96,32815,1733705173549] 2024-12-09T00:46:16,045 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/1bae0942de96,32815,1733705173549 already deleted, retry=false 2024-12-09T00:46:16,046 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 1bae0942de96,32815,1733705173549 expired; onlineServers=1 2024-12-09T00:46:16,046 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [1bae0942de96,36541,1733705173517] 2024-12-09T00:46:16,054 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/1bae0942de96,36541,1733705173517 already deleted, retry=false 2024-12-09T00:46:16,054 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 1bae0942de96,36541,1733705173517 expired; onlineServers=0 2024-12-09T00:46:16,054 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '1bae0942de96,38957,1733705173387' ***** 2024-12-09T00:46:16,054 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-09T00:46:16,054 INFO [M:0;1bae0942de96:38957 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-09T00:46:16,054 INFO [M:0;1bae0942de96:38957 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-09T00:46:16,054 DEBUG [M:0;1bae0942de96:38957 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-09T00:46:16,055 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-09T00:46:16,055 DEBUG [M:0;1bae0942de96:38957 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-09T00:46:16,055 DEBUG [master/1bae0942de96:0:becomeActiveMaster-HFileCleaner.large.0-1733705173942 {}] cleaner.HFileCleaner(306): Exit Thread[master/1bae0942de96:0:becomeActiveMaster-HFileCleaner.large.0-1733705173942,5,FailOnTimeoutGroup] 2024-12-09T00:46:16,055 DEBUG [master/1bae0942de96:0:becomeActiveMaster-HFileCleaner.small.0-1733705173942 {}] cleaner.HFileCleaner(306): Exit Thread[master/1bae0942de96:0:becomeActiveMaster-HFileCleaner.small.0-1733705173942,5,FailOnTimeoutGroup] 2024-12-09T00:46:16,055 INFO [M:0;1bae0942de96:38957 {}] hbase.ChoreService(370): Chore service for: master/1bae0942de96:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-09T00:46:16,055 INFO [M:0;1bae0942de96:38957 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-09T00:46:16,055 DEBUG [M:0;1bae0942de96:38957 {}] master.HMaster(1795): Stopping service threads 2024-12-09T00:46:16,055 INFO [M:0;1bae0942de96:38957 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-09T00:46:16,055 INFO [M:0;1bae0942de96:38957 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-09T00:46:16,055 INFO [M:0;1bae0942de96:38957 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-09T00:46:16,056 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-09T00:46:16,062 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38957-0x100081748120000, quorum=127.0.0.1:63217, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-09T00:46:16,062 DEBUG [M:0;1bae0942de96:38957 {}] zookeeper.ZKUtil(347): master:38957-0x100081748120000, quorum=127.0.0.1:63217, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-09T00:46:16,062 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38957-0x100081748120000, quorum=127.0.0.1:63217, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T00:46:16,062 WARN [M:0;1bae0942de96:38957 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-09T00:46:16,063 INFO [M:0;1bae0942de96:38957 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:37501/user/jenkins/test-data/e75fb8a5-12ed-cfb0-e8ca-8c0181dda140/.lastflushedseqids 2024-12-09T00:46:16,066 WARN [IPC Server handler 0 on default port 37501 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-09T00:46:16,066 WARN [IPC Server handler 0 on default port 37501 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-09T00:46:16,066 WARN [IPC Server handler 0 on default port 37501 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-09T00:46:16,072 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37429 is added to blk_1073741843_1019 (size=127) 2024-12-09T00:46:16,072 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33741 is added to blk_1073741843_1019 (size=127) 2024-12-09T00:46:16,073 INFO [M:0;1bae0942de96:38957 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-09T00:46:16,073 INFO [M:0;1bae0942de96:38957 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-09T00:46:16,073 DEBUG [M:0;1bae0942de96:38957 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-09T00:46:16,073 INFO [M:0;1bae0942de96:38957 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T00:46:16,073 DEBUG [M:0;1bae0942de96:38957 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T00:46:16,073 DEBUG [M:0;1bae0942de96:38957 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-09T00:46:16,073 DEBUG [M:0;1bae0942de96:38957 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T00:46:16,073 INFO [M:0;1bae0942de96:38957 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=26.84 KB heapSize=34.13 KB 2024-12-09T00:46:16,090 DEBUG [M:0;1bae0942de96:38957 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37501/user/jenkins/test-data/e75fb8a5-12ed-cfb0-e8ca-8c0181dda140/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/d5b08a614b2c410286b7765b16286ddd is 82, key is hbase:meta,,1/info:regioninfo/1733705174651/Put/seqid=0 2024-12-09T00:46:16,096 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37429 is added to blk_1073741844_1020 (size=5672) 2024-12-09T00:46:16,096 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33741 is added to blk_1073741844_1020 (size=5672) 2024-12-09T00:46:16,099 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46307 is added to blk_1073741844_1020 (size=5672) 2024-12-09T00:46:16,099 INFO [M:0;1bae0942de96:38957 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:37501/user/jenkins/test-data/e75fb8a5-12ed-cfb0-e8ca-8c0181dda140/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/d5b08a614b2c410286b7765b16286ddd 2024-12-09T00:46:16,120 DEBUG [M:0;1bae0942de96:38957 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37501/user/jenkins/test-data/e75fb8a5-12ed-cfb0-e8ca-8c0181dda140/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/9a4a2e91678148d594e86db82e81cfff is 749, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1733705175137/Put/seqid=0 2024-12-09T00:46:16,126 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33741 is added to blk_1073741845_1021 (size=6440) 2024-12-09T00:46:16,127 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37429 is added to blk_1073741845_1021 (size=6440) 2024-12-09T00:46:16,127 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46307 is added to blk_1073741845_1021 (size=6440) 2024-12-09T00:46:16,127 INFO [M:0;1bae0942de96:38957 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.16 KB at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:37501/user/jenkins/test-data/e75fb8a5-12ed-cfb0-e8ca-8c0181dda140/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/9a4a2e91678148d594e86db82e81cfff 2024-12-09T00:46:16,129 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32815-0x100081748120002, quorum=127.0.0.1:63217, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T00:46:16,129 INFO [RS:1;1bae0942de96:32815 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-09T00:46:16,129 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32815-0x100081748120002, quorum=127.0.0.1:63217, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T00:46:16,129 INFO [RS:1;1bae0942de96:32815 {}] regionserver.HRegionServer(1031): Exiting; stopping=1bae0942de96,32815,1733705173549; zookeeper connection closed. 2024-12-09T00:46:16,129 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@bad8b89 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@bad8b89 2024-12-09T00:46:16,137 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36541-0x100081748120001, quorum=127.0.0.1:63217, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T00:46:16,138 INFO [RS:0;1bae0942de96:36541 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-09T00:46:16,138 INFO [RS:0;1bae0942de96:36541 {}] regionserver.HRegionServer(1031): Exiting; stopping=1bae0942de96,36541,1733705173517; zookeeper connection closed. 2024-12-09T00:46:16,138 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36541-0x100081748120001, quorum=127.0.0.1:63217, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T00:46:16,138 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@435fae63 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@435fae63 2024-12-09T00:46:16,138 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 3 regionserver(s) complete 2024-12-09T00:46:16,150 DEBUG [M:0;1bae0942de96:38957 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37501/user/jenkins/test-data/e75fb8a5-12ed-cfb0-e8ca-8c0181dda140/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/f740ec0c127d490a92fbd276c7e6e7d9 is 69, key is 1bae0942de96,32815,1733705173549/rs:state/1733705174079/Put/seqid=0 2024-12-09T00:46:16,151 WARN [IPC Server handler 4 on default port 37501 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-09T00:46:16,152 WARN [IPC Server handler 4 on default port 37501 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-09T00:46:16,152 WARN [IPC Server handler 4 on default port 37501 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-09T00:46:16,156 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33741 is added to blk_1073741846_1022 (size=5294) 2024-12-09T00:46:16,156 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37429 is added to blk_1073741846_1022 (size=5294) 2024-12-09T00:46:16,156 INFO [M:0;1bae0942de96:38957 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=195 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:37501/user/jenkins/test-data/e75fb8a5-12ed-cfb0-e8ca-8c0181dda140/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/f740ec0c127d490a92fbd276c7e6e7d9 2024-12-09T00:46:16,163 DEBUG [M:0;1bae0942de96:38957 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37501/user/jenkins/test-data/e75fb8a5-12ed-cfb0-e8ca-8c0181dda140/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/d5b08a614b2c410286b7765b16286ddd as hdfs://localhost:37501/user/jenkins/test-data/e75fb8a5-12ed-cfb0-e8ca-8c0181dda140/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/d5b08a614b2c410286b7765b16286ddd 2024-12-09T00:46:16,169 INFO [M:0;1bae0942de96:38957 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37501/user/jenkins/test-data/e75fb8a5-12ed-cfb0-e8ca-8c0181dda140/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/d5b08a614b2c410286b7765b16286ddd, entries=8, sequenceid=72, filesize=5.5 K 2024-12-09T00:46:16,170 DEBUG [M:0;1bae0942de96:38957 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37501/user/jenkins/test-data/e75fb8a5-12ed-cfb0-e8ca-8c0181dda140/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/9a4a2e91678148d594e86db82e81cfff as hdfs://localhost:37501/user/jenkins/test-data/e75fb8a5-12ed-cfb0-e8ca-8c0181dda140/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/9a4a2e91678148d594e86db82e81cfff 2024-12-09T00:46:16,176 INFO [M:0;1bae0942de96:38957 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37501/user/jenkins/test-data/e75fb8a5-12ed-cfb0-e8ca-8c0181dda140/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/9a4a2e91678148d594e86db82e81cfff, entries=8, sequenceid=72, filesize=6.3 K 2024-12-09T00:46:16,177 DEBUG [M:0;1bae0942de96:38957 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37501/user/jenkins/test-data/e75fb8a5-12ed-cfb0-e8ca-8c0181dda140/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/f740ec0c127d490a92fbd276c7e6e7d9 as hdfs://localhost:37501/user/jenkins/test-data/e75fb8a5-12ed-cfb0-e8ca-8c0181dda140/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/f740ec0c127d490a92fbd276c7e6e7d9 2024-12-09T00:46:16,183 INFO [M:0;1bae0942de96:38957 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37501/user/jenkins/test-data/e75fb8a5-12ed-cfb0-e8ca-8c0181dda140/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/f740ec0c127d490a92fbd276c7e6e7d9, entries=3, sequenceid=72, filesize=5.2 K 2024-12-09T00:46:16,185 INFO [M:0;1bae0942de96:38957 {}] regionserver.HRegion(3140): Finished flush of dataSize ~26.84 KB/27483, heapSize ~33.83 KB/34640, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 112ms, sequenceid=72, compaction requested=false 2024-12-09T00:46:16,186 INFO [M:0;1bae0942de96:38957 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T00:46:16,187 DEBUG [M:0;1bae0942de96:38957 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733705176073Disabling compacts and flushes for region at 1733705176073Disabling writes for close at 1733705176073Obtaining lock to block concurrent updates at 1733705176073Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733705176073Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=27483, getHeapSize=34880, getOffHeapSize=0, getCellsCount=85 at 1733705176074 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733705176075 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733705176075Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733705176089 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733705176089Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733705176106 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733705176120 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733705176120Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733705176134 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733705176150 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733705176150Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@23c298cb: reopening flushed file at 1733705176162 (+12 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@41e1a708: reopening flushed file at 1733705176169 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@57ee7da8: reopening flushed file at 1733705176177 (+8 ms)Finished flush of dataSize ~26.84 KB/27483, heapSize ~33.83 KB/34640, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 112ms, sequenceid=72, compaction requested=false at 1733705176185 (+8 ms)Writing region close event to WAL at 1733705176186 (+1 ms)Closed at 1733705176186 2024-12-09T00:46:16,187 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T00:46:16,187 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T00:46:16,187 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T00:46:16,187 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T00:46:16,187 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T00:46:16,189 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46307 is added to blk_1073741830_1006 (size=32686) 2024-12-09T00:46:16,190 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33741 is added to blk_1073741830_1006 (size=32686) 2024-12-09T00:46:16,190 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37429 is added to blk_1073741830_1006 (size=32686) 2024-12-09T00:46:16,190 INFO [M:0;1bae0942de96:38957 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-09T00:46:16,190 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-09T00:46:16,190 INFO [M:0;1bae0942de96:38957 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:38957 2024-12-09T00:46:16,191 INFO [M:0;1bae0942de96:38957 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-09T00:46:16,304 INFO [M:0;1bae0942de96:38957 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-09T00:46:16,304 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38957-0x100081748120000, quorum=127.0.0.1:63217, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T00:46:16,304 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38957-0x100081748120000, quorum=127.0.0.1:63217, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T00:46:16,306 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@51eb0253{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T00:46:16,307 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@38066021{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T00:46:16,307 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T00:46:16,307 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@498db938{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T00:46:16,307 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@25de4fb7{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/396c7edd-89ac-c2ef-b5b3-e3f3c783bb3e/hadoop.log.dir/,STOPPED} 2024-12-09T00:46:16,308 WARN [BP-1723240308-172.17.0.2-1733705171515 heartbeating to localhost/127.0.0.1:37501 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T00:46:16,308 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T00:46:16,308 WARN [BP-1723240308-172.17.0.2-1733705171515 heartbeating to localhost/127.0.0.1:37501 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1723240308-172.17.0.2-1733705171515 (Datanode Uuid 7f24502c-5557-4dd3-bb62-2fccb3b691cd) service to localhost/127.0.0.1:37501 2024-12-09T00:46:16,308 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T00:46:16,309 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/396c7edd-89ac-c2ef-b5b3-e3f3c783bb3e/cluster_b0de736a-8fdc-4c60-9559-c61ad70016c9/data/data5/current/BP-1723240308-172.17.0.2-1733705171515 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T00:46:16,309 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/396c7edd-89ac-c2ef-b5b3-e3f3c783bb3e/cluster_b0de736a-8fdc-4c60-9559-c61ad70016c9/data/data6/current/BP-1723240308-172.17.0.2-1733705171515 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T00:46:16,309 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T00:46:16,312 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@569dbdc8{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T00:46:16,312 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@18c4cb78{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T00:46:16,312 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T00:46:16,312 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2b2d9a02{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T00:46:16,312 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@17cd1c9f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/396c7edd-89ac-c2ef-b5b3-e3f3c783bb3e/hadoop.log.dir/,STOPPED} 2024-12-09T00:46:16,315 WARN [BP-1723240308-172.17.0.2-1733705171515 heartbeating to localhost/127.0.0.1:37501 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T00:46:16,315 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T00:46:16,315 WARN [BP-1723240308-172.17.0.2-1733705171515 heartbeating to localhost/127.0.0.1:37501 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1723240308-172.17.0.2-1733705171515 (Datanode Uuid fcaf4034-57d0-4e71-ae2b-d23be958284f) service to localhost/127.0.0.1:37501 2024-12-09T00:46:16,315 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T00:46:16,316 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/396c7edd-89ac-c2ef-b5b3-e3f3c783bb3e/cluster_b0de736a-8fdc-4c60-9559-c61ad70016c9/data/data3/current/BP-1723240308-172.17.0.2-1733705171515 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T00:46:16,316 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/396c7edd-89ac-c2ef-b5b3-e3f3c783bb3e/cluster_b0de736a-8fdc-4c60-9559-c61ad70016c9/data/data4/current/BP-1723240308-172.17.0.2-1733705171515 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T00:46:16,316 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T00:46:16,319 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@484ab650{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T00:46:16,319 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1d09d7d5{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T00:46:16,319 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T00:46:16,319 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@d63bfae{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T00:46:16,319 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6c411bc1{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/396c7edd-89ac-c2ef-b5b3-e3f3c783bb3e/hadoop.log.dir/,STOPPED} 2024-12-09T00:46:16,320 WARN [BP-1723240308-172.17.0.2-1733705171515 heartbeating to localhost/127.0.0.1:37501 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T00:46:16,320 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T00:46:16,320 WARN [BP-1723240308-172.17.0.2-1733705171515 heartbeating to localhost/127.0.0.1:37501 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1723240308-172.17.0.2-1733705171515 (Datanode Uuid 065a2436-516a-4302-aee5-870871bb86da) service to localhost/127.0.0.1:37501 2024-12-09T00:46:16,321 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T00:46:16,321 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/396c7edd-89ac-c2ef-b5b3-e3f3c783bb3e/cluster_b0de736a-8fdc-4c60-9559-c61ad70016c9/data/data1/current/BP-1723240308-172.17.0.2-1733705171515 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T00:46:16,321 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/396c7edd-89ac-c2ef-b5b3-e3f3c783bb3e/cluster_b0de736a-8fdc-4c60-9559-c61ad70016c9/data/data2/current/BP-1723240308-172.17.0.2-1733705171515 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T00:46:16,321 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T00:46:16,326 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5609bdf8{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-09T00:46:16,326 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7cbf28d2{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T00:46:16,327 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T00:46:16,327 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5574eaf2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T00:46:16,327 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4c686d6e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/396c7edd-89ac-c2ef-b5b3-e3f3c783bb3e/hadoop.log.dir/,STOPPED} 2024-12-09T00:46:16,333 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-09T00:46:16,357 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-09T00:46:16,363 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestHBaseWalOnEC#testReadWrite[1] Thread=151 (was 89) - Thread LEAK? -, OpenFileDescriptor=516 (was 439) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=133 (was 136), ProcessCount=11 (was 11), AvailableMemoryMB=16069 (was 16226)