2024-12-08 05:51:09,708 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba 2024-12-08 05:51:09,725 main DEBUG Took 0.014007 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-12-08 05:51:09,725 main DEBUG PluginManager 'Core' found 129 plugins 2024-12-08 05:51:09,726 main DEBUG PluginManager 'Level' found 0 plugins 2024-12-08 05:51:09,727 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-12-08 05:51:09,729 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-08 05:51:09,741 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-12-08 05:51:09,759 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-08 05:51:09,761 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-08 05:51:09,761 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-08 05:51:09,762 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-08 05:51:09,762 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-08 05:51:09,762 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-08 05:51:09,763 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-08 05:51:09,764 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-08 05:51:09,764 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-08 05:51:09,764 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-08 05:51:09,765 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-08 05:51:09,765 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-08 05:51:09,766 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-08 05:51:09,766 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-08 05:51:09,767 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-08 05:51:09,767 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-08 05:51:09,767 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-08 05:51:09,768 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-08 05:51:09,768 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-08 05:51:09,768 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-08 05:51:09,769 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-08 05:51:09,769 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-08 05:51:09,769 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-08 05:51:09,770 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-08 05:51:09,770 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-08 05:51:09,770 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-12-08 05:51:09,772 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-08 05:51:09,773 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-12-08 05:51:09,775 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-12-08 05:51:09,775 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-12-08 05:51:09,776 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-12-08 05:51:09,776 main DEBUG PluginManager 'Converter' found 47 plugins 2024-12-08 05:51:09,785 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-12-08 05:51:09,788 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-12-08 05:51:09,789 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-12-08 05:51:09,789 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-12-08 05:51:09,790 main DEBUG createAppenders(={Console}) 2024-12-08 05:51:09,790 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba initialized 2024-12-08 05:51:09,791 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba 2024-12-08 05:51:09,791 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba OK. 2024-12-08 05:51:09,791 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-12-08 05:51:09,792 main DEBUG OutputStream closed 2024-12-08 05:51:09,792 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-12-08 05:51:09,792 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-12-08 05:51:09,792 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@49c7b90e OK 2024-12-08 05:51:09,861 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-12-08 05:51:09,863 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-12-08 05:51:09,864 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-12-08 05:51:09,866 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-12-08 05:51:09,866 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-12-08 05:51:09,867 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-12-08 05:51:09,867 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-12-08 05:51:09,868 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-12-08 05:51:09,868 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-12-08 05:51:09,869 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-12-08 05:51:09,869 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-12-08 05:51:09,869 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-12-08 05:51:09,870 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-12-08 05:51:09,870 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-12-08 05:51:09,871 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-12-08 05:51:09,871 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-12-08 05:51:09,871 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-12-08 05:51:09,872 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-12-08 05:51:09,874 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-08 05:51:09,875 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-logging/target/hbase-logging-4.0.0-alpha-1-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@35432107) with optional ClassLoader: null 2024-12-08 05:51:09,875 main DEBUG Shutdown hook enabled. Registering a new one. 2024-12-08 05:51:09,876 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@35432107] started OK. 2024-12-08T05:51:09,892 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC timeout: 26 mins 2024-12-08 05:51:09,895 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-12-08 05:51:09,895 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-08T05:51:10,184 DEBUG [main {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c55d7a9c-97fe-bc49-130e-4731f1cbd10f 2024-12-08T05:51:10,208 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c55d7a9c-97fe-bc49-130e-4731f1cbd10f/cluster_2f457612-755b-caa9-be61-f401a75fa4bd, deleteOnExit=true 2024-12-08T05:51:10,209 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c55d7a9c-97fe-bc49-130e-4731f1cbd10f/test.cache.data in system properties and HBase conf 2024-12-08T05:51:10,210 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c55d7a9c-97fe-bc49-130e-4731f1cbd10f/hadoop.tmp.dir in system properties and HBase conf 2024-12-08T05:51:10,210 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c55d7a9c-97fe-bc49-130e-4731f1cbd10f/hadoop.log.dir in system properties and HBase conf 2024-12-08T05:51:10,211 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c55d7a9c-97fe-bc49-130e-4731f1cbd10f/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-08T05:51:10,212 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c55d7a9c-97fe-bc49-130e-4731f1cbd10f/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-08T05:51:10,212 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-08T05:51:10,327 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-12-08T05:51:10,441 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-08T05:51:10,446 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c55d7a9c-97fe-bc49-130e-4731f1cbd10f/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-08T05:51:10,447 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c55d7a9c-97fe-bc49-130e-4731f1cbd10f/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-08T05:51:10,448 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c55d7a9c-97fe-bc49-130e-4731f1cbd10f/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-08T05:51:10,449 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c55d7a9c-97fe-bc49-130e-4731f1cbd10f/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-08T05:51:10,449 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c55d7a9c-97fe-bc49-130e-4731f1cbd10f/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-08T05:51:10,450 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c55d7a9c-97fe-bc49-130e-4731f1cbd10f/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-08T05:51:10,450 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c55d7a9c-97fe-bc49-130e-4731f1cbd10f/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-08T05:51:10,451 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c55d7a9c-97fe-bc49-130e-4731f1cbd10f/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-08T05:51:10,452 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c55d7a9c-97fe-bc49-130e-4731f1cbd10f/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-08T05:51:10,452 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c55d7a9c-97fe-bc49-130e-4731f1cbd10f/nfs.dump.dir in system properties and HBase conf 2024-12-08T05:51:10,453 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c55d7a9c-97fe-bc49-130e-4731f1cbd10f/java.io.tmpdir in system properties and HBase conf 2024-12-08T05:51:10,453 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c55d7a9c-97fe-bc49-130e-4731f1cbd10f/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-08T05:51:10,454 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c55d7a9c-97fe-bc49-130e-4731f1cbd10f/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-08T05:51:10,455 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c55d7a9c-97fe-bc49-130e-4731f1cbd10f/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-08T05:51:11,317 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-12-08T05:51:11,392 INFO [Time-limited test {}] log.Log(170): Logging initialized @2485ms to org.eclipse.jetty.util.log.Slf4jLog 2024-12-08T05:51:11,468 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-08T05:51:11,529 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-08T05:51:11,550 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-08T05:51:11,550 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-08T05:51:11,552 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-08T05:51:11,564 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-08T05:51:11,567 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@21b7d177{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c55d7a9c-97fe-bc49-130e-4731f1cbd10f/hadoop.log.dir/,AVAILABLE} 2024-12-08T05:51:11,568 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@383d55e4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-08T05:51:11,761 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@76e4c45c{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c55d7a9c-97fe-bc49-130e-4731f1cbd10f/java.io.tmpdir/jetty-localhost-41907-hadoop-hdfs-3_4_1-tests_jar-_-any-5939673560881542574/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-08T05:51:11,768 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4637aff6{HTTP/1.1, (http/1.1)}{localhost:41907} 2024-12-08T05:51:11,768 INFO [Time-limited test {}] server.Server(415): Started @2862ms 2024-12-08T05:51:12,143 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-08T05:51:12,150 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-08T05:51:12,152 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-08T05:51:12,153 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-08T05:51:12,153 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-08T05:51:12,154 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@550154bd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c55d7a9c-97fe-bc49-130e-4731f1cbd10f/hadoop.log.dir/,AVAILABLE} 2024-12-08T05:51:12,155 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1a2478ad{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-08T05:51:12,278 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4839957b{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c55d7a9c-97fe-bc49-130e-4731f1cbd10f/java.io.tmpdir/jetty-localhost-44323-hadoop-hdfs-3_4_1-tests_jar-_-any-9119914126229227606/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-08T05:51:12,279 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5306f615{HTTP/1.1, (http/1.1)}{localhost:44323} 2024-12-08T05:51:12,279 INFO [Time-limited test {}] server.Server(415): Started @3373ms 2024-12-08T05:51:12,334 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-08T05:51:12,454 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-08T05:51:12,459 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-08T05:51:12,460 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-08T05:51:12,460 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-08T05:51:12,460 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-08T05:51:12,462 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6463ad04{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c55d7a9c-97fe-bc49-130e-4731f1cbd10f/hadoop.log.dir/,AVAILABLE} 2024-12-08T05:51:12,462 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7fa8fa5c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-08T05:51:12,608 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1c6b8f01{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c55d7a9c-97fe-bc49-130e-4731f1cbd10f/java.io.tmpdir/jetty-localhost-42335-hadoop-hdfs-3_4_1-tests_jar-_-any-7808512058002774477/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-08T05:51:12,609 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@11f28dd2{HTTP/1.1, (http/1.1)}{localhost:42335} 2024-12-08T05:51:12,610 INFO [Time-limited test {}] server.Server(415): Started @3704ms 2024-12-08T05:51:12,612 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-08T05:51:12,656 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-08T05:51:12,660 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-08T05:51:12,661 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-08T05:51:12,662 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-08T05:51:12,662 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-08T05:51:12,663 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@c62369b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c55d7a9c-97fe-bc49-130e-4731f1cbd10f/hadoop.log.dir/,AVAILABLE} 2024-12-08T05:51:12,664 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@24f92c39{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-08T05:51:12,785 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2e59159d{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c55d7a9c-97fe-bc49-130e-4731f1cbd10f/java.io.tmpdir/jetty-localhost-40573-hadoop-hdfs-3_4_1-tests_jar-_-any-12747234109051123685/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-08T05:51:12,786 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@a8e922f{HTTP/1.1, (http/1.1)}{localhost:40573} 2024-12-08T05:51:12,786 INFO [Time-limited test {}] server.Server(415): Started @3880ms 2024-12-08T05:51:12,789 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-08T05:51:12,810 WARN [Thread-110 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c55d7a9c-97fe-bc49-130e-4731f1cbd10f/cluster_2f457612-755b-caa9-be61-f401a75fa4bd/data/data4/current/BP-1565006184-172.17.0.2-1733637071052/current, will proceed with Du for space computation calculation, 2024-12-08T05:51:12,810 WARN [Thread-112 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c55d7a9c-97fe-bc49-130e-4731f1cbd10f/cluster_2f457612-755b-caa9-be61-f401a75fa4bd/data/data2/current/BP-1565006184-172.17.0.2-1733637071052/current, will proceed with Du for space computation calculation, 2024-12-08T05:51:12,810 WARN [Thread-111 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c55d7a9c-97fe-bc49-130e-4731f1cbd10f/cluster_2f457612-755b-caa9-be61-f401a75fa4bd/data/data1/current/BP-1565006184-172.17.0.2-1733637071052/current, will proceed with Du for space computation calculation, 2024-12-08T05:51:12,810 WARN [Thread-109 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c55d7a9c-97fe-bc49-130e-4731f1cbd10f/cluster_2f457612-755b-caa9-be61-f401a75fa4bd/data/data3/current/BP-1565006184-172.17.0.2-1733637071052/current, will proceed with Du for space computation calculation, 2024-12-08T05:51:12,870 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-08T05:51:12,870 WARN [Thread-81 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-08T05:51:12,913 WARN [Thread-139 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c55d7a9c-97fe-bc49-130e-4731f1cbd10f/cluster_2f457612-755b-caa9-be61-f401a75fa4bd/data/data5/current/BP-1565006184-172.17.0.2-1733637071052/current, will proceed with Du for space computation calculation, 2024-12-08T05:51:12,913 WARN [Thread-140 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c55d7a9c-97fe-bc49-130e-4731f1cbd10f/cluster_2f457612-755b-caa9-be61-f401a75fa4bd/data/data6/current/BP-1565006184-172.17.0.2-1733637071052/current, will proceed with Du for space computation calculation, 2024-12-08T05:51:12,949 WARN [Thread-115 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-08T05:51:12,968 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x3fde7ce156862549 with lease ID 0x84e70be19dfc3ffe: Processing first storage report for DS-7b85ce7e-e863-47c8-a5d3-9012a96a46a7 from datanode DatanodeRegistration(127.0.0.1:32973, datanodeUuid=430f2c8c-be3c-4662-9599-8614d22bb597, infoPort=32979, infoSecurePort=0, ipcPort=39117, storageInfo=lv=-57;cid=testClusterID;nsid=290765163;c=1733637071052) 2024-12-08T05:51:12,969 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x3fde7ce156862549 with lease ID 0x84e70be19dfc3ffe: from storage DS-7b85ce7e-e863-47c8-a5d3-9012a96a46a7 node DatanodeRegistration(127.0.0.1:32973, datanodeUuid=430f2c8c-be3c-4662-9599-8614d22bb597, infoPort=32979, infoSecurePort=0, ipcPort=39117, storageInfo=lv=-57;cid=testClusterID;nsid=290765163;c=1733637071052), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-08T05:51:12,970 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xa400d1044ebf9ea7 with lease ID 0x84e70be19dfc3fff: Processing first storage report for DS-ea85cf9d-32ae-4bbd-b481-f3075d5b11ad from datanode DatanodeRegistration(127.0.0.1:40739, datanodeUuid=1e205595-8b3c-4309-baa9-f9b51c68fce3, infoPort=38043, infoSecurePort=0, ipcPort=44215, storageInfo=lv=-57;cid=testClusterID;nsid=290765163;c=1733637071052) 2024-12-08T05:51:12,970 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa400d1044ebf9ea7 with lease ID 0x84e70be19dfc3fff: from storage DS-ea85cf9d-32ae-4bbd-b481-f3075d5b11ad node DatanodeRegistration(127.0.0.1:40739, datanodeUuid=1e205595-8b3c-4309-baa9-f9b51c68fce3, infoPort=38043, infoSecurePort=0, ipcPort=44215, storageInfo=lv=-57;cid=testClusterID;nsid=290765163;c=1733637071052), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-08T05:51:12,970 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x86cc29c04f90d6bf with lease ID 0x84e70be19dfc4000: Processing first storage report for DS-96f450d9-9119-4e24-9cd0-a78d03add4ab from datanode DatanodeRegistration(127.0.0.1:39331, datanodeUuid=4af3e1bd-b3db-4a80-ae2a-99cfff2bfba8, infoPort=41683, infoSecurePort=0, ipcPort=34399, storageInfo=lv=-57;cid=testClusterID;nsid=290765163;c=1733637071052) 2024-12-08T05:51:12,970 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x86cc29c04f90d6bf with lease ID 0x84e70be19dfc4000: from storage DS-96f450d9-9119-4e24-9cd0-a78d03add4ab node DatanodeRegistration(127.0.0.1:39331, datanodeUuid=4af3e1bd-b3db-4a80-ae2a-99cfff2bfba8, infoPort=41683, infoSecurePort=0, ipcPort=34399, storageInfo=lv=-57;cid=testClusterID;nsid=290765163;c=1733637071052), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-08T05:51:12,971 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x3fde7ce156862549 with lease ID 0x84e70be19dfc3ffe: Processing first storage report for DS-9aaaa921-a9bd-410b-86f6-6f320b141c27 from datanode DatanodeRegistration(127.0.0.1:32973, datanodeUuid=430f2c8c-be3c-4662-9599-8614d22bb597, infoPort=32979, infoSecurePort=0, ipcPort=39117, storageInfo=lv=-57;cid=testClusterID;nsid=290765163;c=1733637071052) 2024-12-08T05:51:12,971 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x3fde7ce156862549 with lease ID 0x84e70be19dfc3ffe: from storage DS-9aaaa921-a9bd-410b-86f6-6f320b141c27 node DatanodeRegistration(127.0.0.1:32973, datanodeUuid=430f2c8c-be3c-4662-9599-8614d22bb597, infoPort=32979, infoSecurePort=0, ipcPort=39117, storageInfo=lv=-57;cid=testClusterID;nsid=290765163;c=1733637071052), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-08T05:51:12,971 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xa400d1044ebf9ea7 with lease ID 0x84e70be19dfc3fff: Processing first storage report for DS-a17de67c-e21b-49a1-a105-559651279aca from datanode DatanodeRegistration(127.0.0.1:40739, datanodeUuid=1e205595-8b3c-4309-baa9-f9b51c68fce3, infoPort=38043, infoSecurePort=0, ipcPort=44215, storageInfo=lv=-57;cid=testClusterID;nsid=290765163;c=1733637071052) 2024-12-08T05:51:12,971 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa400d1044ebf9ea7 with lease ID 0x84e70be19dfc3fff: from storage DS-a17de67c-e21b-49a1-a105-559651279aca node DatanodeRegistration(127.0.0.1:40739, datanodeUuid=1e205595-8b3c-4309-baa9-f9b51c68fce3, infoPort=38043, infoSecurePort=0, ipcPort=44215, storageInfo=lv=-57;cid=testClusterID;nsid=290765163;c=1733637071052), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-08T05:51:12,972 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x86cc29c04f90d6bf with lease ID 0x84e70be19dfc4000: Processing first storage report for DS-94c7c975-5aee-451d-978a-370047fe0725 from datanode DatanodeRegistration(127.0.0.1:39331, datanodeUuid=4af3e1bd-b3db-4a80-ae2a-99cfff2bfba8, infoPort=41683, infoSecurePort=0, ipcPort=34399, storageInfo=lv=-57;cid=testClusterID;nsid=290765163;c=1733637071052) 2024-12-08T05:51:12,972 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x86cc29c04f90d6bf with lease ID 0x84e70be19dfc4000: from storage DS-94c7c975-5aee-451d-978a-370047fe0725 node DatanodeRegistration(127.0.0.1:39331, datanodeUuid=4af3e1bd-b3db-4a80-ae2a-99cfff2bfba8, infoPort=41683, infoSecurePort=0, ipcPort=34399, storageInfo=lv=-57;cid=testClusterID;nsid=290765163;c=1733637071052), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-08T05:51:13,176 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c55d7a9c-97fe-bc49-130e-4731f1cbd10f 2024-12-08T05:51:13,251 WARN [Time-limited test {}] erasurecode.ErasureCodeNative(55): ISA-L support is not available in your platform... using builtin-java codec where applicable 2024-12-08T05:51:13,306 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestHBaseWalOnEC#testReadWrite[0] Thread=159, OpenFileDescriptor=391, MaxFileDescriptor=1048576, SystemLoadAverage=170, ProcessCount=11, AvailableMemoryMB=8115 2024-12-08T05:51:13,308 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=3, rsPorts=, rsClass=null, numDataNodes=3, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-08T05:51:13,316 INFO [Time-limited test {}] hbase.HBaseTestingUtil(821): NOT STARTING DFS 2024-12-08T05:51:13,406 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c55d7a9c-97fe-bc49-130e-4731f1cbd10f/cluster_2f457612-755b-caa9-be61-f401a75fa4bd/zookeeper_0, clientPort=59278, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c55d7a9c-97fe-bc49-130e-4731f1cbd10f/cluster_2f457612-755b-caa9-be61-f401a75fa4bd/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c55d7a9c-97fe-bc49-130e-4731f1cbd10f/cluster_2f457612-755b-caa9-be61-f401a75fa4bd/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-08T05:51:13,417 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=59278 2024-12-08T05:51:13,433 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T05:51:13,437 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T05:51:13,523 WARN [Time-limited test {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-08T05:51:13,524 WARN [Time-limited test {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-08T05:51:13,573 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_598324605_22 at /127.0.0.1:58306 [Receiving block BP-1565006184-172.17.0.2-1733637071052:blk_-9223372036854775792_1001] {}] datanode.DataXceiver(331): 127.0.0.1:40739:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:58306 dst: /127.0.0.1:40739 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T05:51:13,595 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40739 is added to blk_-9223372036854775792_1002 (size=7) 2024-12-08T05:51:13,992 WARN [Time-limited test {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-08T05:51:14,002 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:43739/user/jenkins/test-data/701611d3-3b45-66bc-65ba-44de2053beb1 with version=8 2024-12-08T05:51:14,002 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:43739/user/jenkins/test-data/701611d3-3b45-66bc-65ba-44de2053beb1/hbase-staging 2024-12-08T05:51:14,114 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-12-08T05:51:14,361 INFO [Time-limited test {}] client.ConnectionUtils(128): master/0d942cb2025d:0 server-side Connection retries=45 2024-12-08T05:51:14,372 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-08T05:51:14,373 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-08T05:51:14,377 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-08T05:51:14,378 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-08T05:51:14,378 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-08T05:51:14,516 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-08T05:51:14,576 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-12-08T05:51:14,585 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-12-08T05:51:14,589 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-08T05:51:14,617 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 8205 (auto-detected) 2024-12-08T05:51:14,618 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-12-08T05:51:14,637 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:33841 2024-12-08T05:51:14,660 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:33841 connecting to ZooKeeper ensemble=127.0.0.1:59278 2024-12-08T05:51:14,694 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:338410x0, quorum=127.0.0.1:59278, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-08T05:51:14,697 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:33841-0x10190a054770000 connected 2024-12-08T05:51:14,727 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T05:51:14,729 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T05:51:14,738 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:33841-0x10190a054770000, quorum=127.0.0.1:59278, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-08T05:51:14,742 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:43739/user/jenkins/test-data/701611d3-3b45-66bc-65ba-44de2053beb1, hbase.cluster.distributed=false 2024-12-08T05:51:14,764 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:33841-0x10190a054770000, quorum=127.0.0.1:59278, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-08T05:51:14,769 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=33841 2024-12-08T05:51:14,770 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=33841 2024-12-08T05:51:14,771 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=33841 2024-12-08T05:51:14,771 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=33841 2024-12-08T05:51:14,772 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=33841 2024-12-08T05:51:14,885 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/0d942cb2025d:0 server-side Connection retries=45 2024-12-08T05:51:14,887 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-08T05:51:14,887 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-08T05:51:14,887 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-08T05:51:14,887 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-08T05:51:14,888 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-08T05:51:14,890 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-08T05:51:14,892 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-08T05:51:14,893 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:45547 2024-12-08T05:51:14,895 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:45547 connecting to ZooKeeper ensemble=127.0.0.1:59278 2024-12-08T05:51:14,896 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T05:51:14,900 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T05:51:14,907 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:455470x0, quorum=127.0.0.1:59278, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-08T05:51:14,907 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:45547-0x10190a054770001 connected 2024-12-08T05:51:14,908 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45547-0x10190a054770001, quorum=127.0.0.1:59278, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-08T05:51:14,912 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-08T05:51:14,920 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-08T05:51:14,923 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45547-0x10190a054770001, quorum=127.0.0.1:59278, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-08T05:51:14,928 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45547-0x10190a054770001, quorum=127.0.0.1:59278, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-08T05:51:14,929 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=45547 2024-12-08T05:51:14,932 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=45547 2024-12-08T05:51:14,932 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=45547 2024-12-08T05:51:14,933 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=45547 2024-12-08T05:51:14,933 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=45547 2024-12-08T05:51:14,951 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/0d942cb2025d:0 server-side Connection retries=45 2024-12-08T05:51:14,951 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-08T05:51:14,951 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-08T05:51:14,952 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-08T05:51:14,952 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-08T05:51:14,952 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-08T05:51:14,952 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-08T05:51:14,953 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-08T05:51:14,954 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:33005 2024-12-08T05:51:14,955 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:33005 connecting to ZooKeeper ensemble=127.0.0.1:59278 2024-12-08T05:51:14,956 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T05:51:14,959 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T05:51:14,967 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:330050x0, quorum=127.0.0.1:59278, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-08T05:51:14,968 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:33005-0x10190a054770002 connected 2024-12-08T05:51:14,968 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33005-0x10190a054770002, quorum=127.0.0.1:59278, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-08T05:51:14,969 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-08T05:51:14,973 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-08T05:51:14,974 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33005-0x10190a054770002, quorum=127.0.0.1:59278, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-08T05:51:14,976 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33005-0x10190a054770002, quorum=127.0.0.1:59278, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-08T05:51:14,980 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=33005 2024-12-08T05:51:14,981 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=33005 2024-12-08T05:51:14,981 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=33005 2024-12-08T05:51:14,983 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=33005 2024-12-08T05:51:14,983 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=33005 2024-12-08T05:51:15,002 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/0d942cb2025d:0 server-side Connection retries=45 2024-12-08T05:51:15,002 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-08T05:51:15,002 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-08T05:51:15,002 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-08T05:51:15,002 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-08T05:51:15,003 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-08T05:51:15,003 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-08T05:51:15,003 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-08T05:51:15,004 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:34051 2024-12-08T05:51:15,005 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:34051 connecting to ZooKeeper ensemble=127.0.0.1:59278 2024-12-08T05:51:15,007 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T05:51:15,009 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T05:51:15,016 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:340510x0, quorum=127.0.0.1:59278, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-08T05:51:15,017 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:34051-0x10190a054770003 connected 2024-12-08T05:51:15,017 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34051-0x10190a054770003, quorum=127.0.0.1:59278, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-08T05:51:15,017 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-08T05:51:15,018 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-08T05:51:15,019 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34051-0x10190a054770003, quorum=127.0.0.1:59278, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-08T05:51:15,021 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34051-0x10190a054770003, quorum=127.0.0.1:59278, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-08T05:51:15,022 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=34051 2024-12-08T05:51:15,022 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=34051 2024-12-08T05:51:15,023 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=34051 2024-12-08T05:51:15,024 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=34051 2024-12-08T05:51:15,024 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=34051 2024-12-08T05:51:15,039 DEBUG [M:0;0d942cb2025d:33841 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;0d942cb2025d:33841 2024-12-08T05:51:15,040 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/0d942cb2025d,33841,1733637074167 2024-12-08T05:51:15,047 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33841-0x10190a054770000, quorum=127.0.0.1:59278, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-08T05:51:15,047 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34051-0x10190a054770003, quorum=127.0.0.1:59278, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-08T05:51:15,047 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45547-0x10190a054770001, quorum=127.0.0.1:59278, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-08T05:51:15,047 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33005-0x10190a054770002, quorum=127.0.0.1:59278, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-08T05:51:15,050 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:33841-0x10190a054770000, quorum=127.0.0.1:59278, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/0d942cb2025d,33841,1733637074167 2024-12-08T05:51:15,072 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45547-0x10190a054770001, quorum=127.0.0.1:59278, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-08T05:51:15,072 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34051-0x10190a054770003, quorum=127.0.0.1:59278, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-08T05:51:15,072 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33841-0x10190a054770000, quorum=127.0.0.1:59278, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T05:51:15,072 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45547-0x10190a054770001, quorum=127.0.0.1:59278, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T05:51:15,072 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34051-0x10190a054770003, quorum=127.0.0.1:59278, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T05:51:15,072 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33005-0x10190a054770002, quorum=127.0.0.1:59278, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-08T05:51:15,072 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33005-0x10190a054770002, quorum=127.0.0.1:59278, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T05:51:15,073 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:33841-0x10190a054770000, quorum=127.0.0.1:59278, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-08T05:51:15,075 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/0d942cb2025d,33841,1733637074167 from backup master directory 2024-12-08T05:51:15,078 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45547-0x10190a054770001, quorum=127.0.0.1:59278, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-08T05:51:15,078 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34051-0x10190a054770003, quorum=127.0.0.1:59278, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-08T05:51:15,078 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33005-0x10190a054770002, quorum=127.0.0.1:59278, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-08T05:51:15,078 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33841-0x10190a054770000, quorum=127.0.0.1:59278, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/0d942cb2025d,33841,1733637074167 2024-12-08T05:51:15,078 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33841-0x10190a054770000, quorum=127.0.0.1:59278, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-08T05:51:15,079 WARN [master/0d942cb2025d:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-08T05:51:15,079 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=0d942cb2025d,33841,1733637074167 2024-12-08T05:51:15,081 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-12-08T05:51:15,083 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-12-08T05:51:15,145 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:43739/user/jenkins/test-data/701611d3-3b45-66bc-65ba-44de2053beb1/hbase.id] with ID: f9012b3e-fd7d-4b47-8285-176401f122f8 2024-12-08T05:51:15,145 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:43739/user/jenkins/test-data/701611d3-3b45-66bc-65ba-44de2053beb1/.tmp/hbase.id 2024-12-08T05:51:15,152 WARN [master/0d942cb2025d:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-08T05:51:15,152 WARN [master/0d942cb2025d:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-08T05:51:15,156 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_598324605_22 at /127.0.0.1:36686 [Receiving block BP-1565006184-172.17.0.2-1733637071052:blk_-9223372036854775776_1003] {}] datanode.DataXceiver(331): 127.0.0.1:39331:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36686 dst: /127.0.0.1:39331 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T05:51:15,161 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39331 is added to blk_-9223372036854775776_1004 (size=42) 2024-12-08T05:51:15,162 WARN [master/0d942cb2025d:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-08T05:51:15,162 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:43739/user/jenkins/test-data/701611d3-3b45-66bc-65ba-44de2053beb1/.tmp/hbase.id]:[hdfs://localhost:43739/user/jenkins/test-data/701611d3-3b45-66bc-65ba-44de2053beb1/hbase.id] 2024-12-08T05:51:15,205 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T05:51:15,210 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-08T05:51:15,228 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 17ms. 2024-12-08T05:51:15,233 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45547-0x10190a054770001, quorum=127.0.0.1:59278, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T05:51:15,233 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33841-0x10190a054770000, quorum=127.0.0.1:59278, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T05:51:15,233 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34051-0x10190a054770003, quorum=127.0.0.1:59278, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T05:51:15,233 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33005-0x10190a054770002, quorum=127.0.0.1:59278, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T05:51:15,245 WARN [master/0d942cb2025d:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-08T05:51:15,245 WARN [master/0d942cb2025d:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-08T05:51:15,248 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_598324605_22 at /127.0.0.1:58338 [Receiving block BP-1565006184-172.17.0.2-1733637071052:blk_-9223372036854775760_1005] {}] datanode.DataXceiver(331): 127.0.0.1:40739:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:58338 dst: /127.0.0.1:40739 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T05:51:15,253 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40739 is added to blk_-9223372036854775760_1006 (size=196) 2024-12-08T05:51:15,254 WARN [master/0d942cb2025d:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-08T05:51:15,269 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-08T05:51:15,271 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-08T05:51:15,277 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-08T05:51:15,304 WARN [master/0d942cb2025d:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-08T05:51:15,304 WARN [master/0d942cb2025d:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-08T05:51:15,307 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_598324605_22 at /127.0.0.1:36716 [Receiving block BP-1565006184-172.17.0.2-1733637071052:blk_-9223372036854775744_1007] {}] datanode.DataXceiver(331): 127.0.0.1:39331:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36716 dst: /127.0.0.1:39331 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T05:51:15,312 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39331 is added to blk_-9223372036854775744_1008 (size=1189) 2024-12-08T05:51:15,313 WARN [master/0d942cb2025d:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-08T05:51:15,331 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:43739/user/jenkins/test-data/701611d3-3b45-66bc-65ba-44de2053beb1/MasterData/data/master/store 2024-12-08T05:51:15,348 WARN [master/0d942cb2025d:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-08T05:51:15,348 WARN [master/0d942cb2025d:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-08T05:51:15,352 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_598324605_22 at /127.0.0.1:58344 [Receiving block BP-1565006184-172.17.0.2-1733637071052:blk_-9223372036854775728_1009] {}] datanode.DataXceiver(331): 127.0.0.1:40739:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:58344 dst: /127.0.0.1:40739 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T05:51:15,356 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40739 is added to blk_-9223372036854775728_1010 (size=34) 2024-12-08T05:51:15,357 WARN [master/0d942cb2025d:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-08T05:51:15,362 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-12-08T05:51:15,365 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T05:51:15,366 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-08T05:51:15,366 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T05:51:15,366 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T05:51:15,368 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-08T05:51:15,368 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T05:51:15,368 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T05:51:15,370 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733637075366Disabling compacts and flushes for region at 1733637075366Disabling writes for close at 1733637075368 (+2 ms)Writing region close event to WAL at 1733637075368Closed at 1733637075368 2024-12-08T05:51:15,372 WARN [master/0d942cb2025d:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:43739/user/jenkins/test-data/701611d3-3b45-66bc-65ba-44de2053beb1/MasterData/data/master/store/.initializing 2024-12-08T05:51:15,372 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:43739/user/jenkins/test-data/701611d3-3b45-66bc-65ba-44de2053beb1/MasterData/WALs/0d942cb2025d,33841,1733637074167 2024-12-08T05:51:15,383 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-08T05:51:15,399 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=0d942cb2025d%2C33841%2C1733637074167, suffix=, logDir=hdfs://localhost:43739/user/jenkins/test-data/701611d3-3b45-66bc-65ba-44de2053beb1/MasterData/WALs/0d942cb2025d,33841,1733637074167, archiveDir=hdfs://localhost:43739/user/jenkins/test-data/701611d3-3b45-66bc-65ba-44de2053beb1/MasterData/oldWALs, maxLogs=10 2024-12-08T05:51:15,431 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/701611d3-3b45-66bc-65ba-44de2053beb1/MasterData/WALs/0d942cb2025d,33841,1733637074167/0d942cb2025d%2C33841%2C1733637074167.1733637075405, exclude list is [], retry=0 2024-12-08T05:51:15,453 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(244): No decryptEncryptedDataEncryptionKey method in DFSClient, should be hadoop version with HDFS-12396 java.lang.NoSuchMethodException: org.apache.hadoop.hdfs.DFSClient.decryptEncryptedDataEncryptionKey(org.apache.hadoop.fs.FileEncryptionInfo) at java.lang.Class.getDeclaredMethod(Class.java:2675) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelperWithoutHDFS12396(FanOutOneBlockAsyncDFSOutputSaslHelper.java:183) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelper(FanOutOneBlockAsyncDFSOutputSaslHelper.java:242) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.(FanOutOneBlockAsyncDFSOutputSaslHelper.java:253) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.initialize(FanOutOneBlockAsyncDFSOutputHelper.java:413) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper$5.operationComplete(FanOutOneBlockAsyncDFSOutputHelper.java:472) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper$5.operationComplete(FanOutOneBlockAsyncDFSOutputHelper.java:467) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.NettyFutureUtils.lambda$addListener$0(NettyFutureUtils.java:56) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListener0(DefaultPromise.java:590) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListeners0(DefaultPromise.java:583) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListenersNow(DefaultPromise.java:559) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListeners(DefaultPromise.java:492) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.setValue0(DefaultPromise.java:636) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.setSuccess0(DefaultPromise.java:625) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.trySuccess(DefaultPromise.java:105) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPromise.trySuccess(DefaultChannelPromise.java:84) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.fulfillConnectPromise(AbstractEpollChannel.java:658) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.finishConnect(AbstractEpollChannel.java:696) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.epollOutReady(AbstractEpollChannel.java:567) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.processReady(EpollEventLoop.java:491) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:399) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T05:51:15,454 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:32973,DS-7b85ce7e-e863-47c8-a5d3-9012a96a46a7,DISK] 2024-12-08T05:51:15,454 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39331,DS-96f450d9-9119-4e24-9cd0-a78d03add4ab,DISK] 2024-12-08T05:51:15,454 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:40739,DS-ea85cf9d-32ae-4bbd-b481-f3075d5b11ad,DISK] 2024-12-08T05:51:15,458 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.ProtobufDecoder(117): Hadoop 3.3 and above shades protobuf. 2024-12-08T05:51:15,498 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/701611d3-3b45-66bc-65ba-44de2053beb1/MasterData/WALs/0d942cb2025d,33841,1733637074167/0d942cb2025d%2C33841%2C1733637074167.1733637075405 2024-12-08T05:51:15,499 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:41683:41683),(127.0.0.1/127.0.0.1:32979:32979),(127.0.0.1/127.0.0.1:38043:38043)] 2024-12-08T05:51:15,499 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-08T05:51:15,500 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T05:51:15,503 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-08T05:51:15,504 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-08T05:51:15,543 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-08T05:51:15,570 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-08T05:51:15,574 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T05:51:15,576 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T05:51:15,577 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-08T05:51:15,579 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-08T05:51:15,580 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T05:51:15,580 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-08T05:51:15,581 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-08T05:51:15,583 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-08T05:51:15,583 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T05:51:15,584 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-08T05:51:15,584 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-08T05:51:15,587 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-08T05:51:15,587 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T05:51:15,588 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-08T05:51:15,588 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-08T05:51:15,591 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43739/user/jenkins/test-data/701611d3-3b45-66bc-65ba-44de2053beb1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-08T05:51:15,592 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43739/user/jenkins/test-data/701611d3-3b45-66bc-65ba-44de2053beb1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-08T05:51:15,597 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-08T05:51:15,597 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-08T05:51:15,600 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-08T05:51:15,603 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-08T05:51:15,608 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43739/user/jenkins/test-data/701611d3-3b45-66bc-65ba-44de2053beb1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-08T05:51:15,609 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=72747832, jitterRate=0.08402717113494873}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-08T05:51:15,616 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733637075517Initializing all the Stores at 1733637075519 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733637075520 (+1 ms)Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733637075520Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733637075521 (+1 ms)Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733637075521Cleaning up temporary data from old regions at 1733637075597 (+76 ms)Region opened successfully at 1733637075615 (+18 ms) 2024-12-08T05:51:15,617 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-08T05:51:15,651 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@681e6d0f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=0d942cb2025d/172.17.0.2:0 2024-12-08T05:51:15,684 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-08T05:51:15,695 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-08T05:51:15,695 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-08T05:51:15,698 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-08T05:51:15,699 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 1 msec 2024-12-08T05:51:15,704 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 4 msec 2024-12-08T05:51:15,704 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-08T05:51:15,730 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-08T05:51:15,738 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33841-0x10190a054770000, quorum=127.0.0.1:59278, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-08T05:51:15,740 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-08T05:51:15,742 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-08T05:51:15,743 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33841-0x10190a054770000, quorum=127.0.0.1:59278, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-08T05:51:15,745 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-08T05:51:15,747 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-08T05:51:15,751 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33841-0x10190a054770000, quorum=127.0.0.1:59278, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-08T05:51:15,752 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-08T05:51:15,753 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33841-0x10190a054770000, quorum=127.0.0.1:59278, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-08T05:51:15,756 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-08T05:51:15,773 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33841-0x10190a054770000, quorum=127.0.0.1:59278, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-08T05:51:15,775 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-08T05:51:15,779 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33005-0x10190a054770002, quorum=127.0.0.1:59278, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-08T05:51:15,779 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34051-0x10190a054770003, quorum=127.0.0.1:59278, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-08T05:51:15,779 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45547-0x10190a054770001, quorum=127.0.0.1:59278, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-08T05:51:15,779 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33841-0x10190a054770000, quorum=127.0.0.1:59278, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-08T05:51:15,779 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34051-0x10190a054770003, quorum=127.0.0.1:59278, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T05:51:15,779 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45547-0x10190a054770001, quorum=127.0.0.1:59278, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T05:51:15,779 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33005-0x10190a054770002, quorum=127.0.0.1:59278, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T05:51:15,779 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33841-0x10190a054770000, quorum=127.0.0.1:59278, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T05:51:15,782 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=0d942cb2025d,33841,1733637074167, sessionid=0x10190a054770000, setting cluster-up flag (Was=false) 2024-12-08T05:51:15,795 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34051-0x10190a054770003, quorum=127.0.0.1:59278, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T05:51:15,795 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33841-0x10190a054770000, quorum=127.0.0.1:59278, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T05:51:15,795 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33005-0x10190a054770002, quorum=127.0.0.1:59278, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T05:51:15,795 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45547-0x10190a054770001, quorum=127.0.0.1:59278, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T05:51:15,801 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-08T05:51:15,803 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=0d942cb2025d,33841,1733637074167 2024-12-08T05:51:15,810 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34051-0x10190a054770003, quorum=127.0.0.1:59278, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T05:51:15,810 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45547-0x10190a054770001, quorum=127.0.0.1:59278, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T05:51:15,810 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33005-0x10190a054770002, quorum=127.0.0.1:59278, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T05:51:15,810 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33841-0x10190a054770000, quorum=127.0.0.1:59278, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T05:51:15,818 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-08T05:51:15,820 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=0d942cb2025d,33841,1733637074167 2024-12-08T05:51:15,827 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:43739/user/jenkins/test-data/701611d3-3b45-66bc-65ba-44de2053beb1/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-08T05:51:15,902 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-08T05:51:15,912 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-08T05:51:15,919 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-08T05:51:15,925 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 0d942cb2025d,33841,1733637074167 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-08T05:51:15,928 INFO [RS:1;0d942cb2025d:33005 {}] regionserver.HRegionServer(746): ClusterId : f9012b3e-fd7d-4b47-8285-176401f122f8 2024-12-08T05:51:15,928 INFO [RS:0;0d942cb2025d:45547 {}] regionserver.HRegionServer(746): ClusterId : f9012b3e-fd7d-4b47-8285-176401f122f8 2024-12-08T05:51:15,928 INFO [RS:2;0d942cb2025d:34051 {}] regionserver.HRegionServer(746): ClusterId : f9012b3e-fd7d-4b47-8285-176401f122f8 2024-12-08T05:51:15,931 DEBUG [RS:1;0d942cb2025d:33005 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-08T05:51:15,931 DEBUG [RS:0;0d942cb2025d:45547 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-08T05:51:15,931 DEBUG [RS:2;0d942cb2025d:34051 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-08T05:51:15,932 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/0d942cb2025d:0, corePoolSize=5, maxPoolSize=5 2024-12-08T05:51:15,932 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/0d942cb2025d:0, corePoolSize=5, maxPoolSize=5 2024-12-08T05:51:15,933 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/0d942cb2025d:0, corePoolSize=5, maxPoolSize=5 2024-12-08T05:51:15,933 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/0d942cb2025d:0, corePoolSize=5, maxPoolSize=5 2024-12-08T05:51:15,933 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/0d942cb2025d:0, corePoolSize=10, maxPoolSize=10 2024-12-08T05:51:15,933 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/0d942cb2025d:0, corePoolSize=1, maxPoolSize=1 2024-12-08T05:51:15,933 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/0d942cb2025d:0, corePoolSize=2, maxPoolSize=2 2024-12-08T05:51:15,934 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/0d942cb2025d:0, corePoolSize=1, maxPoolSize=1 2024-12-08T05:51:15,936 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733637105936 2024-12-08T05:51:15,937 DEBUG [RS:2;0d942cb2025d:34051 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-08T05:51:15,937 DEBUG [RS:0;0d942cb2025d:45547 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-08T05:51:15,937 DEBUG [RS:1;0d942cb2025d:33005 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-08T05:51:15,937 DEBUG [RS:2;0d942cb2025d:34051 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-08T05:51:15,937 DEBUG [RS:0;0d942cb2025d:45547 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-08T05:51:15,937 DEBUG [RS:1;0d942cb2025d:33005 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-08T05:51:15,938 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-08T05:51:15,939 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-08T05:51:15,940 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-08T05:51:15,941 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-08T05:51:15,941 DEBUG [RS:2;0d942cb2025d:34051 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-08T05:51:15,941 DEBUG [RS:1;0d942cb2025d:33005 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-08T05:51:15,941 DEBUG [RS:0;0d942cb2025d:45547 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-08T05:51:15,942 DEBUG [RS:2;0d942cb2025d:34051 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7ea701c6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=0d942cb2025d/172.17.0.2:0 2024-12-08T05:51:15,942 DEBUG [RS:1;0d942cb2025d:33005 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@58573f98, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=0d942cb2025d/172.17.0.2:0 2024-12-08T05:51:15,942 DEBUG [RS:0;0d942cb2025d:45547 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7dcbfee5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=0d942cb2025d/172.17.0.2:0 2024-12-08T05:51:15,943 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-08T05:51:15,944 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-08T05:51:15,944 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-08T05:51:15,944 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-08T05:51:15,949 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T05:51:15,949 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-08T05:51:15,949 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-08T05:51:15,954 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-08T05:51:15,955 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-08T05:51:15,956 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-08T05:51:15,958 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-08T05:51:15,959 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-08T05:51:15,961 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/0d942cb2025d:0:becomeActiveMaster-HFileCleaner.large.0-1733637075960,5,FailOnTimeoutGroup] 2024-12-08T05:51:15,962 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/0d942cb2025d:0:becomeActiveMaster-HFileCleaner.small.0-1733637075961,5,FailOnTimeoutGroup] 2024-12-08T05:51:15,962 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-08T05:51:15,962 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-08T05:51:15,962 DEBUG [RS:0;0d942cb2025d:45547 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;0d942cb2025d:45547 2024-12-08T05:51:15,962 DEBUG [RS:1;0d942cb2025d:33005 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;0d942cb2025d:33005 2024-12-08T05:51:15,963 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-08T05:51:15,964 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-08T05:51:15,964 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-08T05:51:15,964 DEBUG [RS:2;0d942cb2025d:34051 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:2;0d942cb2025d:34051 2024-12-08T05:51:15,964 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-08T05:51:15,967 INFO [RS:1;0d942cb2025d:33005 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-08T05:51:15,967 INFO [RS:2;0d942cb2025d:34051 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-08T05:51:15,967 INFO [RS:0;0d942cb2025d:45547 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-08T05:51:15,967 INFO [RS:1;0d942cb2025d:33005 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-08T05:51:15,967 INFO [RS:2;0d942cb2025d:34051 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-08T05:51:15,967 INFO [RS:0;0d942cb2025d:45547 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-08T05:51:15,967 DEBUG [RS:1;0d942cb2025d:33005 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-08T05:51:15,967 DEBUG [RS:2;0d942cb2025d:34051 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-08T05:51:15,967 DEBUG [RS:0;0d942cb2025d:45547 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-08T05:51:15,970 INFO [RS:2;0d942cb2025d:34051 {}] regionserver.HRegionServer(2659): reportForDuty to master=0d942cb2025d,33841,1733637074167 with port=34051, startcode=1733637075001 2024-12-08T05:51:15,970 INFO [RS:0;0d942cb2025d:45547 {}] regionserver.HRegionServer(2659): reportForDuty to master=0d942cb2025d,33841,1733637074167 with port=45547, startcode=1733637074845 2024-12-08T05:51:15,970 INFO [RS:1;0d942cb2025d:33005 {}] regionserver.HRegionServer(2659): reportForDuty to master=0d942cb2025d,33841,1733637074167 with port=33005, startcode=1733637074950 2024-12-08T05:51:15,974 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_598324605_22 at /127.0.0.1:58362 [Receiving block BP-1565006184-172.17.0.2-1733637071052:blk_-9223372036854775712_1012] {}] datanode.DataXceiver(331): 127.0.0.1:40739:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:58362 dst: /127.0.0.1:40739 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T05:51:15,984 DEBUG [RS:2;0d942cb2025d:34051 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-08T05:51:15,984 DEBUG [RS:0;0d942cb2025d:45547 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-08T05:51:15,984 DEBUG [RS:1;0d942cb2025d:33005 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-08T05:51:15,986 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40739 is added to blk_-9223372036854775712_1013 (size=1321) 2024-12-08T05:51:15,987 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-08T05:51:15,988 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:43739/user/jenkins/test-data/701611d3-3b45-66bc-65ba-44de2053beb1/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-08T05:51:15,989 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:43739/user/jenkins/test-data/701611d3-3b45-66bc-65ba-44de2053beb1 2024-12-08T05:51:15,997 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-08T05:51:15,997 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-08T05:51:15,997 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32973 is added to blk_-9223372036854775788_1002 (size=7) 2024-12-08T05:51:15,999 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39331 is added to blk_-9223372036854775789_1002 (size=7) 2024-12-08T05:51:16,005 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_598324605_22 at /127.0.0.1:58384 [Receiving block BP-1565006184-172.17.0.2-1733637071052:blk_-9223372036854775696_1014] {}] datanode.DataXceiver(331): 127.0.0.1:40739:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:58384 dst: /127.0.0.1:40739 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T05:51:16,018 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40739 is added to blk_-9223372036854775696_1015 (size=32) 2024-12-08T05:51:16,019 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-08T05:51:16,021 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T05:51:16,024 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-08T05:51:16,026 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52659, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-12-08T05:51:16,026 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43095, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-12-08T05:51:16,027 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-08T05:51:16,027 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T05:51:16,028 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T05:51:16,029 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-08T05:51:16,031 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-08T05:51:16,031 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T05:51:16,032 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T05:51:16,033 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-08T05:51:16,033 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33841 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 0d942cb2025d,34051,1733637075001 2024-12-08T05:51:16,033 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52161, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-12-08T05:51:16,035 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-08T05:51:16,035 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T05:51:16,036 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T05:51:16,036 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33841 {}] master.ServerManager(517): Registering regionserver=0d942cb2025d,34051,1733637075001 2024-12-08T05:51:16,036 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-08T05:51:16,039 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-08T05:51:16,039 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T05:51:16,040 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T05:51:16,040 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-08T05:51:16,042 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43739/user/jenkins/test-data/701611d3-3b45-66bc-65ba-44de2053beb1/data/hbase/meta/1588230740 2024-12-08T05:51:16,043 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43739/user/jenkins/test-data/701611d3-3b45-66bc-65ba-44de2053beb1/data/hbase/meta/1588230740 2024-12-08T05:51:16,046 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-08T05:51:16,046 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-08T05:51:16,047 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-08T05:51:16,049 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-08T05:51:16,053 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33841 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 0d942cb2025d,33005,1733637074950 2024-12-08T05:51:16,053 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33841 {}] master.ServerManager(517): Registering regionserver=0d942cb2025d,33005,1733637074950 2024-12-08T05:51:16,057 DEBUG [RS:2;0d942cb2025d:34051 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:43739/user/jenkins/test-data/701611d3-3b45-66bc-65ba-44de2053beb1 2024-12-08T05:51:16,057 DEBUG [RS:2;0d942cb2025d:34051 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:43739 2024-12-08T05:51:16,057 DEBUG [RS:2;0d942cb2025d:34051 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-08T05:51:16,059 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33841 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 0d942cb2025d,45547,1733637074845 2024-12-08T05:51:16,059 DEBUG [RS:1;0d942cb2025d:33005 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:43739/user/jenkins/test-data/701611d3-3b45-66bc-65ba-44de2053beb1 2024-12-08T05:51:16,059 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33841 {}] master.ServerManager(517): Registering regionserver=0d942cb2025d,45547,1733637074845 2024-12-08T05:51:16,059 DEBUG [RS:1;0d942cb2025d:33005 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:43739 2024-12-08T05:51:16,059 DEBUG [RS:1;0d942cb2025d:33005 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-08T05:51:16,062 DEBUG [RS:0;0d942cb2025d:45547 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:43739/user/jenkins/test-data/701611d3-3b45-66bc-65ba-44de2053beb1 2024-12-08T05:51:16,063 DEBUG [RS:0;0d942cb2025d:45547 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:43739 2024-12-08T05:51:16,063 DEBUG [RS:0;0d942cb2025d:45547 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-08T05:51:16,067 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33841-0x10190a054770000, quorum=127.0.0.1:59278, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-08T05:51:16,068 DEBUG [RS:1;0d942cb2025d:33005 {}] zookeeper.ZKUtil(111): regionserver:33005-0x10190a054770002, quorum=127.0.0.1:59278, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/0d942cb2025d,33005,1733637074950 2024-12-08T05:51:16,069 DEBUG [RS:2;0d942cb2025d:34051 {}] zookeeper.ZKUtil(111): regionserver:34051-0x10190a054770003, quorum=127.0.0.1:59278, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/0d942cb2025d,34051,1733637075001 2024-12-08T05:51:16,069 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43739/user/jenkins/test-data/701611d3-3b45-66bc-65ba-44de2053beb1/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-08T05:51:16,069 WARN [RS:2;0d942cb2025d:34051 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-08T05:51:16,069 WARN [RS:1;0d942cb2025d:33005 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-08T05:51:16,069 INFO [RS:1;0d942cb2025d:33005 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-08T05:51:16,069 INFO [RS:2;0d942cb2025d:34051 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-08T05:51:16,069 DEBUG [RS:2;0d942cb2025d:34051 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:43739/user/jenkins/test-data/701611d3-3b45-66bc-65ba-44de2053beb1/WALs/0d942cb2025d,34051,1733637075001 2024-12-08T05:51:16,069 DEBUG [RS:1;0d942cb2025d:33005 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:43739/user/jenkins/test-data/701611d3-3b45-66bc-65ba-44de2053beb1/WALs/0d942cb2025d,33005,1733637074950 2024-12-08T05:51:16,070 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=58857649, jitterRate=-0.12295268476009369}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-08T05:51:16,070 DEBUG [RS:0;0d942cb2025d:45547 {}] zookeeper.ZKUtil(111): regionserver:45547-0x10190a054770001, quorum=127.0.0.1:59278, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/0d942cb2025d,45547,1733637074845 2024-12-08T05:51:16,071 WARN [RS:0;0d942cb2025d:45547 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-08T05:51:16,071 INFO [RS:0;0d942cb2025d:45547 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-08T05:51:16,071 DEBUG [RS:0;0d942cb2025d:45547 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:43739/user/jenkins/test-data/701611d3-3b45-66bc-65ba-44de2053beb1/WALs/0d942cb2025d,45547,1733637074845 2024-12-08T05:51:16,073 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733637076021Initializing all the Stores at 1733637076023 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733637076023Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733637076024 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733637076024Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733637076024Cleaning up temporary data from old regions at 1733637076046 (+22 ms)Region opened successfully at 1733637076073 (+27 ms) 2024-12-08T05:51:16,073 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-08T05:51:16,073 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-08T05:51:16,073 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-08T05:51:16,073 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-08T05:51:16,074 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-08T05:51:16,076 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [0d942cb2025d,34051,1733637075001] 2024-12-08T05:51:16,076 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [0d942cb2025d,33005,1733637074950] 2024-12-08T05:51:16,076 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [0d942cb2025d,45547,1733637074845] 2024-12-08T05:51:16,085 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-08T05:51:16,085 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733637076073Disabling compacts and flushes for region at 1733637076073Disabling writes for close at 1733637076073Writing region close event to WAL at 1733637076084 (+11 ms)Closed at 1733637076085 (+1 ms) 2024-12-08T05:51:16,088 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-08T05:51:16,088 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-08T05:51:16,095 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-08T05:51:16,100 INFO [RS:1;0d942cb2025d:33005 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-08T05:51:16,100 INFO [RS:2;0d942cb2025d:34051 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-08T05:51:16,100 INFO [RS:0;0d942cb2025d:45547 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-08T05:51:16,105 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-08T05:51:16,109 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-08T05:51:16,115 INFO [RS:1;0d942cb2025d:33005 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-08T05:51:16,116 INFO [RS:0;0d942cb2025d:45547 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-08T05:51:16,118 INFO [RS:2;0d942cb2025d:34051 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-08T05:51:16,120 INFO [RS:1;0d942cb2025d:33005 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-08T05:51:16,120 INFO [RS:1;0d942cb2025d:33005 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-08T05:51:16,120 INFO [RS:2;0d942cb2025d:34051 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-08T05:51:16,120 INFO [RS:0;0d942cb2025d:45547 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-08T05:51:16,121 INFO [RS:0;0d942cb2025d:45547 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-08T05:51:16,121 INFO [RS:2;0d942cb2025d:34051 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-08T05:51:16,121 INFO [RS:1;0d942cb2025d:33005 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-08T05:51:16,124 INFO [RS:0;0d942cb2025d:45547 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-08T05:51:16,124 INFO [RS:2;0d942cb2025d:34051 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-08T05:51:16,128 INFO [RS:1;0d942cb2025d:33005 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-08T05:51:16,128 INFO [RS:2;0d942cb2025d:34051 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-08T05:51:16,128 INFO [RS:0;0d942cb2025d:45547 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-08T05:51:16,130 INFO [RS:1;0d942cb2025d:33005 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-08T05:51:16,130 INFO [RS:0;0d942cb2025d:45547 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-08T05:51:16,130 INFO [RS:2;0d942cb2025d:34051 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-08T05:51:16,130 DEBUG [RS:1;0d942cb2025d:33005 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/0d942cb2025d:0, corePoolSize=1, maxPoolSize=1 2024-12-08T05:51:16,130 DEBUG [RS:2;0d942cb2025d:34051 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/0d942cb2025d:0, corePoolSize=1, maxPoolSize=1 2024-12-08T05:51:16,130 DEBUG [RS:0;0d942cb2025d:45547 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/0d942cb2025d:0, corePoolSize=1, maxPoolSize=1 2024-12-08T05:51:16,130 DEBUG [RS:1;0d942cb2025d:33005 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/0d942cb2025d:0, corePoolSize=1, maxPoolSize=1 2024-12-08T05:51:16,130 DEBUG [RS:2;0d942cb2025d:34051 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/0d942cb2025d:0, corePoolSize=1, maxPoolSize=1 2024-12-08T05:51:16,130 DEBUG [RS:0;0d942cb2025d:45547 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/0d942cb2025d:0, corePoolSize=1, maxPoolSize=1 2024-12-08T05:51:16,130 DEBUG [RS:1;0d942cb2025d:33005 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/0d942cb2025d:0, corePoolSize=1, maxPoolSize=1 2024-12-08T05:51:16,130 DEBUG [RS:2;0d942cb2025d:34051 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/0d942cb2025d:0, corePoolSize=1, maxPoolSize=1 2024-12-08T05:51:16,130 DEBUG [RS:1;0d942cb2025d:33005 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/0d942cb2025d:0, corePoolSize=1, maxPoolSize=1 2024-12-08T05:51:16,130 DEBUG [RS:0;0d942cb2025d:45547 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/0d942cb2025d:0, corePoolSize=1, maxPoolSize=1 2024-12-08T05:51:16,131 DEBUG [RS:1;0d942cb2025d:33005 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/0d942cb2025d:0, corePoolSize=1, maxPoolSize=1 2024-12-08T05:51:16,131 DEBUG [RS:2;0d942cb2025d:34051 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/0d942cb2025d:0, corePoolSize=1, maxPoolSize=1 2024-12-08T05:51:16,131 DEBUG [RS:0;0d942cb2025d:45547 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/0d942cb2025d:0, corePoolSize=1, maxPoolSize=1 2024-12-08T05:51:16,131 DEBUG [RS:1;0d942cb2025d:33005 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/0d942cb2025d:0, corePoolSize=2, maxPoolSize=2 2024-12-08T05:51:16,131 DEBUG [RS:2;0d942cb2025d:34051 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/0d942cb2025d:0, corePoolSize=1, maxPoolSize=1 2024-12-08T05:51:16,131 DEBUG [RS:1;0d942cb2025d:33005 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/0d942cb2025d:0, corePoolSize=1, maxPoolSize=1 2024-12-08T05:51:16,131 DEBUG [RS:0;0d942cb2025d:45547 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/0d942cb2025d:0, corePoolSize=1, maxPoolSize=1 2024-12-08T05:51:16,131 DEBUG [RS:2;0d942cb2025d:34051 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/0d942cb2025d:0, corePoolSize=2, maxPoolSize=2 2024-12-08T05:51:16,131 DEBUG [RS:1;0d942cb2025d:33005 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/0d942cb2025d:0, corePoolSize=1, maxPoolSize=1 2024-12-08T05:51:16,131 DEBUG [RS:0;0d942cb2025d:45547 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/0d942cb2025d:0, corePoolSize=2, maxPoolSize=2 2024-12-08T05:51:16,131 DEBUG [RS:2;0d942cb2025d:34051 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/0d942cb2025d:0, corePoolSize=1, maxPoolSize=1 2024-12-08T05:51:16,131 DEBUG [RS:1;0d942cb2025d:33005 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/0d942cb2025d:0, corePoolSize=1, maxPoolSize=1 2024-12-08T05:51:16,131 DEBUG [RS:0;0d942cb2025d:45547 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/0d942cb2025d:0, corePoolSize=1, maxPoolSize=1 2024-12-08T05:51:16,131 DEBUG [RS:2;0d942cb2025d:34051 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/0d942cb2025d:0, corePoolSize=1, maxPoolSize=1 2024-12-08T05:51:16,131 DEBUG [RS:2;0d942cb2025d:34051 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/0d942cb2025d:0, corePoolSize=1, maxPoolSize=1 2024-12-08T05:51:16,131 DEBUG [RS:1;0d942cb2025d:33005 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/0d942cb2025d:0, corePoolSize=1, maxPoolSize=1 2024-12-08T05:51:16,131 DEBUG [RS:0;0d942cb2025d:45547 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/0d942cb2025d:0, corePoolSize=1, maxPoolSize=1 2024-12-08T05:51:16,131 DEBUG [RS:2;0d942cb2025d:34051 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/0d942cb2025d:0, corePoolSize=1, maxPoolSize=1 2024-12-08T05:51:16,132 DEBUG [RS:2;0d942cb2025d:34051 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/0d942cb2025d:0, corePoolSize=1, maxPoolSize=1 2024-12-08T05:51:16,132 DEBUG [RS:1;0d942cb2025d:33005 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/0d942cb2025d:0, corePoolSize=1, maxPoolSize=1 2024-12-08T05:51:16,132 DEBUG [RS:0;0d942cb2025d:45547 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/0d942cb2025d:0, corePoolSize=1, maxPoolSize=1 2024-12-08T05:51:16,132 DEBUG [RS:2;0d942cb2025d:34051 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/0d942cb2025d:0, corePoolSize=1, maxPoolSize=1 2024-12-08T05:51:16,132 DEBUG [RS:1;0d942cb2025d:33005 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/0d942cb2025d:0, corePoolSize=1, maxPoolSize=1 2024-12-08T05:51:16,132 DEBUG [RS:0;0d942cb2025d:45547 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/0d942cb2025d:0, corePoolSize=1, maxPoolSize=1 2024-12-08T05:51:16,132 DEBUG [RS:2;0d942cb2025d:34051 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/0d942cb2025d:0, corePoolSize=3, maxPoolSize=3 2024-12-08T05:51:16,132 DEBUG [RS:2;0d942cb2025d:34051 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/0d942cb2025d:0, corePoolSize=3, maxPoolSize=3 2024-12-08T05:51:16,132 DEBUG [RS:1;0d942cb2025d:33005 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/0d942cb2025d:0, corePoolSize=3, maxPoolSize=3 2024-12-08T05:51:16,132 DEBUG [RS:0;0d942cb2025d:45547 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/0d942cb2025d:0, corePoolSize=1, maxPoolSize=1 2024-12-08T05:51:16,132 DEBUG [RS:0;0d942cb2025d:45547 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/0d942cb2025d:0, corePoolSize=1, maxPoolSize=1 2024-12-08T05:51:16,132 DEBUG [RS:1;0d942cb2025d:33005 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/0d942cb2025d:0, corePoolSize=3, maxPoolSize=3 2024-12-08T05:51:16,132 DEBUG [RS:0;0d942cb2025d:45547 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/0d942cb2025d:0, corePoolSize=3, maxPoolSize=3 2024-12-08T05:51:16,133 DEBUG [RS:0;0d942cb2025d:45547 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/0d942cb2025d:0, corePoolSize=3, maxPoolSize=3 2024-12-08T05:51:16,136 INFO [RS:2;0d942cb2025d:34051 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-08T05:51:16,137 INFO [RS:2;0d942cb2025d:34051 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-08T05:51:16,137 INFO [RS:2;0d942cb2025d:34051 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-08T05:51:16,137 INFO [RS:2;0d942cb2025d:34051 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-08T05:51:16,137 INFO [RS:1;0d942cb2025d:33005 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-08T05:51:16,137 INFO [RS:2;0d942cb2025d:34051 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-08T05:51:16,137 INFO [RS:1;0d942cb2025d:33005 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-08T05:51:16,137 INFO [RS:1;0d942cb2025d:33005 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-08T05:51:16,137 INFO [RS:2;0d942cb2025d:34051 {}] hbase.ChoreService(168): Chore ScheduledChore name=0d942cb2025d,34051,1733637075001-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-08T05:51:16,137 INFO [RS:1;0d942cb2025d:33005 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-08T05:51:16,137 INFO [RS:1;0d942cb2025d:33005 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-08T05:51:16,137 INFO [RS:1;0d942cb2025d:33005 {}] hbase.ChoreService(168): Chore ScheduledChore name=0d942cb2025d,33005,1733637074950-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-08T05:51:16,138 INFO [RS:0;0d942cb2025d:45547 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-08T05:51:16,138 INFO [RS:0;0d942cb2025d:45547 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-08T05:51:16,138 INFO [RS:0;0d942cb2025d:45547 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-08T05:51:16,138 INFO [RS:0;0d942cb2025d:45547 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-08T05:51:16,138 INFO [RS:0;0d942cb2025d:45547 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-08T05:51:16,138 INFO [RS:0;0d942cb2025d:45547 {}] hbase.ChoreService(168): Chore ScheduledChore name=0d942cb2025d,45547,1733637074845-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-08T05:51:16,162 INFO [RS:0;0d942cb2025d:45547 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-08T05:51:16,164 INFO [RS:0;0d942cb2025d:45547 {}] hbase.ChoreService(168): Chore ScheduledChore name=0d942cb2025d,45547,1733637074845-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-08T05:51:16,164 INFO [RS:1;0d942cb2025d:33005 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-08T05:51:16,164 INFO [RS:2;0d942cb2025d:34051 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-08T05:51:16,164 INFO [RS:0;0d942cb2025d:45547 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T05:51:16,164 INFO [RS:2;0d942cb2025d:34051 {}] hbase.ChoreService(168): Chore ScheduledChore name=0d942cb2025d,34051,1733637075001-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-08T05:51:16,164 INFO [RS:1;0d942cb2025d:33005 {}] hbase.ChoreService(168): Chore ScheduledChore name=0d942cb2025d,33005,1733637074950-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-08T05:51:16,164 INFO [RS:0;0d942cb2025d:45547 {}] regionserver.Replication(171): 0d942cb2025d,45547,1733637074845 started 2024-12-08T05:51:16,165 INFO [RS:1;0d942cb2025d:33005 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T05:51:16,165 INFO [RS:2;0d942cb2025d:34051 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T05:51:16,165 INFO [RS:1;0d942cb2025d:33005 {}] regionserver.Replication(171): 0d942cb2025d,33005,1733637074950 started 2024-12-08T05:51:16,165 INFO [RS:2;0d942cb2025d:34051 {}] regionserver.Replication(171): 0d942cb2025d,34051,1733637075001 started 2024-12-08T05:51:16,186 INFO [RS:0;0d942cb2025d:45547 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T05:51:16,186 INFO [RS:0;0d942cb2025d:45547 {}] regionserver.HRegionServer(1482): Serving as 0d942cb2025d,45547,1733637074845, RpcServer on 0d942cb2025d/172.17.0.2:45547, sessionid=0x10190a054770001 2024-12-08T05:51:16,187 DEBUG [RS:0;0d942cb2025d:45547 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-08T05:51:16,187 DEBUG [RS:0;0d942cb2025d:45547 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 0d942cb2025d,45547,1733637074845 2024-12-08T05:51:16,187 DEBUG [RS:0;0d942cb2025d:45547 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '0d942cb2025d,45547,1733637074845' 2024-12-08T05:51:16,188 DEBUG [RS:0;0d942cb2025d:45547 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-08T05:51:16,189 DEBUG [RS:0;0d942cb2025d:45547 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-08T05:51:16,189 DEBUG [RS:0;0d942cb2025d:45547 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-08T05:51:16,189 DEBUG [RS:0;0d942cb2025d:45547 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-08T05:51:16,190 DEBUG [RS:0;0d942cb2025d:45547 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 0d942cb2025d,45547,1733637074845 2024-12-08T05:51:16,190 INFO [RS:1;0d942cb2025d:33005 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T05:51:16,190 INFO [RS:2;0d942cb2025d:34051 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T05:51:16,190 DEBUG [RS:0;0d942cb2025d:45547 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '0d942cb2025d,45547,1733637074845' 2024-12-08T05:51:16,190 DEBUG [RS:0;0d942cb2025d:45547 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-08T05:51:16,190 INFO [RS:2;0d942cb2025d:34051 {}] regionserver.HRegionServer(1482): Serving as 0d942cb2025d,34051,1733637075001, RpcServer on 0d942cb2025d/172.17.0.2:34051, sessionid=0x10190a054770003 2024-12-08T05:51:16,190 INFO [RS:1;0d942cb2025d:33005 {}] regionserver.HRegionServer(1482): Serving as 0d942cb2025d,33005,1733637074950, RpcServer on 0d942cb2025d/172.17.0.2:33005, sessionid=0x10190a054770002 2024-12-08T05:51:16,190 DEBUG [RS:1;0d942cb2025d:33005 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-08T05:51:16,190 DEBUG [RS:2;0d942cb2025d:34051 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-08T05:51:16,190 DEBUG [RS:2;0d942cb2025d:34051 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 0d942cb2025d,34051,1733637075001 2024-12-08T05:51:16,190 DEBUG [RS:1;0d942cb2025d:33005 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 0d942cb2025d,33005,1733637074950 2024-12-08T05:51:16,190 DEBUG [RS:2;0d942cb2025d:34051 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '0d942cb2025d,34051,1733637075001' 2024-12-08T05:51:16,190 DEBUG [RS:1;0d942cb2025d:33005 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '0d942cb2025d,33005,1733637074950' 2024-12-08T05:51:16,190 DEBUG [RS:1;0d942cb2025d:33005 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-08T05:51:16,190 DEBUG [RS:2;0d942cb2025d:34051 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-08T05:51:16,190 DEBUG [RS:0;0d942cb2025d:45547 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-08T05:51:16,191 DEBUG [RS:2;0d942cb2025d:34051 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-08T05:51:16,191 DEBUG [RS:1;0d942cb2025d:33005 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-08T05:51:16,191 DEBUG [RS:0;0d942cb2025d:45547 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-08T05:51:16,191 INFO [RS:0;0d942cb2025d:45547 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-08T05:51:16,191 INFO [RS:0;0d942cb2025d:45547 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-08T05:51:16,192 DEBUG [RS:2;0d942cb2025d:34051 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-08T05:51:16,192 DEBUG [RS:1;0d942cb2025d:33005 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-08T05:51:16,192 DEBUG [RS:2;0d942cb2025d:34051 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-08T05:51:16,192 DEBUG [RS:2;0d942cb2025d:34051 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 0d942cb2025d,34051,1733637075001 2024-12-08T05:51:16,192 DEBUG [RS:1;0d942cb2025d:33005 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-08T05:51:16,192 DEBUG [RS:2;0d942cb2025d:34051 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '0d942cb2025d,34051,1733637075001' 2024-12-08T05:51:16,192 DEBUG [RS:1;0d942cb2025d:33005 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 0d942cb2025d,33005,1733637074950 2024-12-08T05:51:16,192 DEBUG [RS:2;0d942cb2025d:34051 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-08T05:51:16,192 DEBUG [RS:1;0d942cb2025d:33005 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '0d942cb2025d,33005,1733637074950' 2024-12-08T05:51:16,192 DEBUG [RS:1;0d942cb2025d:33005 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-08T05:51:16,193 DEBUG [RS:2;0d942cb2025d:34051 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-08T05:51:16,193 DEBUG [RS:1;0d942cb2025d:33005 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-08T05:51:16,193 DEBUG [RS:2;0d942cb2025d:34051 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-08T05:51:16,193 DEBUG [RS:1;0d942cb2025d:33005 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-08T05:51:16,193 INFO [RS:2;0d942cb2025d:34051 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-08T05:51:16,193 INFO [RS:1;0d942cb2025d:33005 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-08T05:51:16,193 INFO [RS:2;0d942cb2025d:34051 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-08T05:51:16,193 INFO [RS:1;0d942cb2025d:33005 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-08T05:51:16,260 WARN [0d942cb2025d:33841 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-08T05:51:16,297 INFO [RS:1;0d942cb2025d:33005 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-08T05:51:16,297 INFO [RS:0;0d942cb2025d:45547 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-08T05:51:16,297 INFO [RS:2;0d942cb2025d:34051 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-08T05:51:16,301 INFO [RS:1;0d942cb2025d:33005 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=0d942cb2025d%2C33005%2C1733637074950, suffix=, logDir=hdfs://localhost:43739/user/jenkins/test-data/701611d3-3b45-66bc-65ba-44de2053beb1/WALs/0d942cb2025d,33005,1733637074950, archiveDir=hdfs://localhost:43739/user/jenkins/test-data/701611d3-3b45-66bc-65ba-44de2053beb1/oldWALs, maxLogs=32 2024-12-08T05:51:16,301 INFO [RS:0;0d942cb2025d:45547 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=0d942cb2025d%2C45547%2C1733637074845, suffix=, logDir=hdfs://localhost:43739/user/jenkins/test-data/701611d3-3b45-66bc-65ba-44de2053beb1/WALs/0d942cb2025d,45547,1733637074845, archiveDir=hdfs://localhost:43739/user/jenkins/test-data/701611d3-3b45-66bc-65ba-44de2053beb1/oldWALs, maxLogs=32 2024-12-08T05:51:16,301 INFO [RS:2;0d942cb2025d:34051 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=0d942cb2025d%2C34051%2C1733637075001, suffix=, logDir=hdfs://localhost:43739/user/jenkins/test-data/701611d3-3b45-66bc-65ba-44de2053beb1/WALs/0d942cb2025d,34051,1733637075001, archiveDir=hdfs://localhost:43739/user/jenkins/test-data/701611d3-3b45-66bc-65ba-44de2053beb1/oldWALs, maxLogs=32 2024-12-08T05:51:16,322 DEBUG [RS:1;0d942cb2025d:33005 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/701611d3-3b45-66bc-65ba-44de2053beb1/WALs/0d942cb2025d,33005,1733637074950/0d942cb2025d%2C33005%2C1733637074950.1733637076307, exclude list is [], retry=0 2024-12-08T05:51:16,322 DEBUG [RS:0;0d942cb2025d:45547 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/701611d3-3b45-66bc-65ba-44de2053beb1/WALs/0d942cb2025d,45547,1733637074845/0d942cb2025d%2C45547%2C1733637074845.1733637076307, exclude list is [], retry=0 2024-12-08T05:51:16,323 DEBUG [RS:2;0d942cb2025d:34051 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/701611d3-3b45-66bc-65ba-44de2053beb1/WALs/0d942cb2025d,34051,1733637075001/0d942cb2025d%2C34051%2C1733637075001.1733637076307, exclude list is [], retry=0 2024-12-08T05:51:16,327 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39331,DS-96f450d9-9119-4e24-9cd0-a78d03add4ab,DISK] 2024-12-08T05:51:16,327 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:32973,DS-7b85ce7e-e863-47c8-a5d3-9012a96a46a7,DISK] 2024-12-08T05:51:16,328 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:40739,DS-ea85cf9d-32ae-4bbd-b481-f3075d5b11ad,DISK] 2024-12-08T05:51:16,329 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:40739,DS-ea85cf9d-32ae-4bbd-b481-f3075d5b11ad,DISK] 2024-12-08T05:51:16,329 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39331,DS-96f450d9-9119-4e24-9cd0-a78d03add4ab,DISK] 2024-12-08T05:51:16,329 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:32973,DS-7b85ce7e-e863-47c8-a5d3-9012a96a46a7,DISK] 2024-12-08T05:51:16,353 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:32973,DS-7b85ce7e-e863-47c8-a5d3-9012a96a46a7,DISK] 2024-12-08T05:51:16,354 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:40739,DS-ea85cf9d-32ae-4bbd-b481-f3075d5b11ad,DISK] 2024-12-08T05:51:16,354 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39331,DS-96f450d9-9119-4e24-9cd0-a78d03add4ab,DISK] 2024-12-08T05:51:16,359 INFO [RS:1;0d942cb2025d:33005 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/701611d3-3b45-66bc-65ba-44de2053beb1/WALs/0d942cb2025d,33005,1733637074950/0d942cb2025d%2C33005%2C1733637074950.1733637076307 2024-12-08T05:51:16,359 INFO [RS:2;0d942cb2025d:34051 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/701611d3-3b45-66bc-65ba-44de2053beb1/WALs/0d942cb2025d,34051,1733637075001/0d942cb2025d%2C34051%2C1733637075001.1733637076307 2024-12-08T05:51:16,360 INFO [RS:0;0d942cb2025d:45547 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/701611d3-3b45-66bc-65ba-44de2053beb1/WALs/0d942cb2025d,45547,1733637074845/0d942cb2025d%2C45547%2C1733637074845.1733637076307 2024-12-08T05:51:16,360 DEBUG [RS:2;0d942cb2025d:34051 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:38043:38043),(127.0.0.1/127.0.0.1:32979:32979),(127.0.0.1/127.0.0.1:41683:41683)] 2024-12-08T05:51:16,361 DEBUG [RS:1;0d942cb2025d:33005 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:38043:38043),(127.0.0.1/127.0.0.1:32979:32979),(127.0.0.1/127.0.0.1:41683:41683)] 2024-12-08T05:51:16,361 DEBUG [RS:0;0d942cb2025d:45547 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:41683:41683),(127.0.0.1/127.0.0.1:32979:32979),(127.0.0.1/127.0.0.1:38043:38043)] 2024-12-08T05:51:16,513 DEBUG [0d942cb2025d:33841 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=3, allServersCount=3 2024-12-08T05:51:16,521 DEBUG [0d942cb2025d:33841 {}] balancer.BalancerClusterState(204): Hosts are {0d942cb2025d=0} racks are {/default-rack=0} 2024-12-08T05:51:16,528 DEBUG [0d942cb2025d:33841 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-08T05:51:16,528 DEBUG [0d942cb2025d:33841 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-08T05:51:16,528 DEBUG [0d942cb2025d:33841 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-08T05:51:16,528 DEBUG [0d942cb2025d:33841 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-08T05:51:16,528 DEBUG [0d942cb2025d:33841 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-08T05:51:16,528 DEBUG [0d942cb2025d:33841 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-08T05:51:16,528 INFO [0d942cb2025d:33841 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-08T05:51:16,528 INFO [0d942cb2025d:33841 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-08T05:51:16,528 INFO [0d942cb2025d:33841 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-08T05:51:16,528 DEBUG [0d942cb2025d:33841 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-08T05:51:16,536 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=0d942cb2025d,34051,1733637075001 2024-12-08T05:51:16,543 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 0d942cb2025d,34051,1733637075001, state=OPENING 2024-12-08T05:51:16,548 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-08T05:51:16,549 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34051-0x10190a054770003, quorum=127.0.0.1:59278, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T05:51:16,549 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33841-0x10190a054770000, quorum=127.0.0.1:59278, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T05:51:16,549 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33005-0x10190a054770002, quorum=127.0.0.1:59278, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T05:51:16,550 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45547-0x10190a054770001, quorum=127.0.0.1:59278, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T05:51:16,550 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-08T05:51:16,551 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-08T05:51:16,551 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-08T05:51:16,551 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-08T05:51:16,553 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-08T05:51:16,555 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=0d942cb2025d,34051,1733637075001}] 2024-12-08T05:51:16,731 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-08T05:51:16,733 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48213, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-08T05:51:16,746 INFO [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-08T05:51:16,747 INFO [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-08T05:51:16,747 INFO [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-12-08T05:51:16,750 INFO [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=0d942cb2025d%2C34051%2C1733637075001.meta, suffix=.meta, logDir=hdfs://localhost:43739/user/jenkins/test-data/701611d3-3b45-66bc-65ba-44de2053beb1/WALs/0d942cb2025d,34051,1733637075001, archiveDir=hdfs://localhost:43739/user/jenkins/test-data/701611d3-3b45-66bc-65ba-44de2053beb1/oldWALs, maxLogs=32 2024-12-08T05:51:16,766 DEBUG [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/701611d3-3b45-66bc-65ba-44de2053beb1/WALs/0d942cb2025d,34051,1733637075001/0d942cb2025d%2C34051%2C1733637075001.meta.1733637076752.meta, exclude list is [], retry=0 2024-12-08T05:51:16,771 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:40739,DS-ea85cf9d-32ae-4bbd-b481-f3075d5b11ad,DISK] 2024-12-08T05:51:16,771 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:32973,DS-7b85ce7e-e863-47c8-a5d3-9012a96a46a7,DISK] 2024-12-08T05:51:16,771 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39331,DS-96f450d9-9119-4e24-9cd0-a78d03add4ab,DISK] 2024-12-08T05:51:16,774 INFO [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/701611d3-3b45-66bc-65ba-44de2053beb1/WALs/0d942cb2025d,34051,1733637075001/0d942cb2025d%2C34051%2C1733637075001.meta.1733637076752.meta 2024-12-08T05:51:16,775 DEBUG [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:38043:38043),(127.0.0.1/127.0.0.1:32979:32979),(127.0.0.1/127.0.0.1:41683:41683)] 2024-12-08T05:51:16,775 DEBUG [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-08T05:51:16,777 DEBUG [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-08T05:51:16,781 DEBUG [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-08T05:51:16,787 INFO [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-08T05:51:16,793 DEBUG [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-08T05:51:16,793 DEBUG [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T05:51:16,794 DEBUG [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-08T05:51:16,794 DEBUG [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-08T05:51:16,797 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-08T05:51:16,799 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-08T05:51:16,799 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T05:51:16,800 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T05:51:16,800 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-08T05:51:16,801 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-08T05:51:16,801 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T05:51:16,802 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T05:51:16,802 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-08T05:51:16,803 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-08T05:51:16,804 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T05:51:16,804 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T05:51:16,805 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-08T05:51:16,806 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-08T05:51:16,806 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T05:51:16,807 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T05:51:16,807 DEBUG [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-08T05:51:16,808 DEBUG [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43739/user/jenkins/test-data/701611d3-3b45-66bc-65ba-44de2053beb1/data/hbase/meta/1588230740 2024-12-08T05:51:16,811 DEBUG [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43739/user/jenkins/test-data/701611d3-3b45-66bc-65ba-44de2053beb1/data/hbase/meta/1588230740 2024-12-08T05:51:16,813 DEBUG [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-08T05:51:16,813 DEBUG [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-08T05:51:16,814 DEBUG [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-08T05:51:16,817 DEBUG [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-08T05:51:16,819 INFO [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=62001298, jitterRate=-0.0761086642742157}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-08T05:51:16,819 DEBUG [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-08T05:51:16,820 DEBUG [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733637076794Writing region info on filesystem at 1733637076795 (+1 ms)Initializing all the Stores at 1733637076797 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733637076797Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733637076797Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733637076797Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733637076797Cleaning up temporary data from old regions at 1733637076813 (+16 ms)Running coprocessor post-open hooks at 1733637076819 (+6 ms)Region opened successfully at 1733637076820 (+1 ms) 2024-12-08T05:51:16,828 INFO [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733637076722 2024-12-08T05:51:16,839 DEBUG [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-08T05:51:16,839 INFO [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-08T05:51:16,841 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=0d942cb2025d,34051,1733637075001 2024-12-08T05:51:16,843 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 0d942cb2025d,34051,1733637075001, state=OPEN 2024-12-08T05:51:16,868 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45547-0x10190a054770001, quorum=127.0.0.1:59278, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-08T05:51:16,868 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34051-0x10190a054770003, quorum=127.0.0.1:59278, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-08T05:51:16,868 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33005-0x10190a054770002, quorum=127.0.0.1:59278, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-08T05:51:16,868 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33841-0x10190a054770000, quorum=127.0.0.1:59278, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-08T05:51:16,868 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-08T05:51:16,868 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-08T05:51:16,868 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-08T05:51:16,869 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=0d942cb2025d,34051,1733637075001 2024-12-08T05:51:16,876 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-08T05:51:16,876 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-08T05:51:16,876 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=0d942cb2025d,34051,1733637075001 in 315 msec 2024-12-08T05:51:16,883 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-08T05:51:16,883 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 783 msec 2024-12-08T05:51:16,884 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-08T05:51:16,885 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-08T05:51:16,914 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-08T05:51:16,916 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=0d942cb2025d,34051,1733637075001, seqNum=-1] 2024-12-08T05:51:16,936 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T05:51:16,939 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53701, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T05:51:16,957 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.1000 sec 2024-12-08T05:51:16,957 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733637076957, completionTime=-1 2024-12-08T05:51:16,960 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=3; waited=0ms, expected min=3 server(s), max=3 server(s), master is running 2024-12-08T05:51:16,960 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-08T05:51:16,985 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=3 2024-12-08T05:51:16,985 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733637136985 2024-12-08T05:51:16,985 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733637196985 2024-12-08T05:51:16,985 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 25 msec 2024-12-08T05:51:16,987 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 1588230740 changed from -1.0 to 0.0, refreshing cache 2024-12-08T05:51:16,993 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0d942cb2025d,33841,1733637074167-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-08T05:51:16,993 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0d942cb2025d,33841,1733637074167-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T05:51:16,993 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0d942cb2025d,33841,1733637074167-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T05:51:16,995 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-0d942cb2025d:33841, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T05:51:16,995 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-08T05:51:16,996 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-08T05:51:17,002 DEBUG [master/0d942cb2025d:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-08T05:51:17,024 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.944sec 2024-12-08T05:51:17,026 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-08T05:51:17,027 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-08T05:51:17,028 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-08T05:51:17,029 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-08T05:51:17,029 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-08T05:51:17,030 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0d942cb2025d,33841,1733637074167-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-08T05:51:17,030 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0d942cb2025d,33841,1733637074167-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-08T05:51:17,034 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-08T05:51:17,035 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-08T05:51:17,035 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0d942cb2025d,33841,1733637074167-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T05:51:17,039 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@265d0449, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T05:51:17,044 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-12-08T05:51:17,044 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-12-08T05:51:17,047 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 0d942cb2025d,33841,-1 for getting cluster id 2024-12-08T05:51:17,050 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-08T05:51:17,058 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'f9012b3e-fd7d-4b47-8285-176401f122f8' 2024-12-08T05:51:17,060 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-08T05:51:17,061 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "f9012b3e-fd7d-4b47-8285-176401f122f8" 2024-12-08T05:51:17,063 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@31b92bdd, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T05:51:17,063 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [0d942cb2025d,33841,-1] 2024-12-08T05:51:17,065 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-08T05:51:17,067 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T05:51:17,068 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57060, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-08T05:51:17,071 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@367610db, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T05:51:17,072 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-08T05:51:17,079 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=0d942cb2025d,34051,1733637075001, seqNum=-1] 2024-12-08T05:51:17,079 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T05:51:17,082 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50918, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T05:51:17,101 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=0d942cb2025d,33841,1733637074167 2024-12-08T05:51:17,105 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-08T05:51:17,110 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.AsyncConnectionImpl(321): The fetched master address is 0d942cb2025d,33841,1733637074167 2024-12-08T05:51:17,112 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@1f2ce21c 2024-12-08T05:51:17,113 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-08T05:51:17,115 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57074, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-08T05:51:17,120 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33841 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-08T05:51:17,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33841 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC 2024-12-08T05:51:17,129 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_PRE_OPERATION 2024-12-08T05:51:17,131 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33841 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestHBaseWalOnEC" procId is: 4 2024-12-08T05:51:17,132 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T05:51:17,134 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-08T05:51:17,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33841 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-08T05:51:17,142 WARN [PEWorker-3 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-08T05:51:17,142 WARN [PEWorker-3 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-08T05:51:17,145 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_598324605_22 at /127.0.0.1:47852 [Receiving block BP-1565006184-172.17.0.2-1733637071052:blk_-9223372036854775680_1020] {}] datanode.DataXceiver(331): 127.0.0.1:32973:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:47852 dst: /127.0.0.1:32973 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T05:51:17,151 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32973 is added to blk_-9223372036854775680_1021 (size=392) 2024-12-08T05:51:17,152 WARN [PEWorker-3 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-08T05:51:17,154 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 3ea3128e85b661a549ecfc427d57b88a, NAME => 'TestHBaseWalOnEC,,1733637077116.3ea3128e85b661a549ecfc427d57b88a.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:43739/user/jenkins/test-data/701611d3-3b45-66bc-65ba-44de2053beb1 2024-12-08T05:51:17,160 WARN [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-08T05:51:17,160 WARN [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-08T05:51:17,166 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_598324605_22 at /127.0.0.1:58440 [Receiving block BP-1565006184-172.17.0.2-1733637071052:blk_-9223372036854775664_1022] {}] datanode.DataXceiver(331): 127.0.0.1:40739:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:58440 dst: /127.0.0.1:40739 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T05:51:17,174 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40739 is added to blk_-9223372036854775664_1023 (size=51) 2024-12-08T05:51:17,175 WARN [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-08T05:51:17,176 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1733637077116.3ea3128e85b661a549ecfc427d57b88a.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T05:51:17,176 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1722): Closing 3ea3128e85b661a549ecfc427d57b88a, disabling compactions & flushes 2024-12-08T05:51:17,176 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1733637077116.3ea3128e85b661a549ecfc427d57b88a. 2024-12-08T05:51:17,176 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1733637077116.3ea3128e85b661a549ecfc427d57b88a. 2024-12-08T05:51:17,176 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1733637077116.3ea3128e85b661a549ecfc427d57b88a. after waiting 0 ms 2024-12-08T05:51:17,176 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1733637077116.3ea3128e85b661a549ecfc427d57b88a. 2024-12-08T05:51:17,176 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1733637077116.3ea3128e85b661a549ecfc427d57b88a. 2024-12-08T05:51:17,176 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1676): Region close journal for 3ea3128e85b661a549ecfc427d57b88a: Waiting for close lock at 1733637077176Disabling compacts and flushes for region at 1733637077176Disabling writes for close at 1733637077176Writing region close event to WAL at 1733637077176Closed at 1733637077176 2024-12-08T05:51:17,179 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ADD_TO_META 2024-12-08T05:51:17,186 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestHBaseWalOnEC,,1733637077116.3ea3128e85b661a549ecfc427d57b88a.","families":{"info":[{"qualifier":"regioninfo","vlen":50,"tag":[],"timestamp":"1733637077179"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733637077179"}]},"ts":"1733637077179"} 2024-12-08T05:51:17,191 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-08T05:51:17,193 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-08T05:51:17,196 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733637077194"}]},"ts":"1733637077194"} 2024-12-08T05:51:17,201 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLING in hbase:meta 2024-12-08T05:51:17,201 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {0d942cb2025d=0} racks are {/default-rack=0} 2024-12-08T05:51:17,203 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-08T05:51:17,203 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-08T05:51:17,203 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-08T05:51:17,203 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-08T05:51:17,203 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-08T05:51:17,203 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-08T05:51:17,203 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-08T05:51:17,203 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-08T05:51:17,203 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-08T05:51:17,203 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-08T05:51:17,204 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=3ea3128e85b661a549ecfc427d57b88a, ASSIGN}] 2024-12-08T05:51:17,206 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=3ea3128e85b661a549ecfc427d57b88a, ASSIGN 2024-12-08T05:51:17,209 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=3ea3128e85b661a549ecfc427d57b88a, ASSIGN; state=OFFLINE, location=0d942cb2025d,34051,1733637075001; forceNewPlan=false, retain=false 2024-12-08T05:51:17,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33841 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-08T05:51:17,361 INFO [0d942cb2025d:33841 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-08T05:51:17,362 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=3ea3128e85b661a549ecfc427d57b88a, regionState=OPENING, regionLocation=0d942cb2025d,34051,1733637075001 2024-12-08T05:51:17,366 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=3ea3128e85b661a549ecfc427d57b88a, ASSIGN because future has completed 2024-12-08T05:51:17,367 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 3ea3128e85b661a549ecfc427d57b88a, server=0d942cb2025d,34051,1733637075001}] 2024-12-08T05:51:17,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33841 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-08T05:51:17,527 INFO [RS_OPEN_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestHBaseWalOnEC,,1733637077116.3ea3128e85b661a549ecfc427d57b88a. 2024-12-08T05:51:17,527 DEBUG [RS_OPEN_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 3ea3128e85b661a549ecfc427d57b88a, NAME => 'TestHBaseWalOnEC,,1733637077116.3ea3128e85b661a549ecfc427d57b88a.', STARTKEY => '', ENDKEY => ''} 2024-12-08T05:51:17,528 DEBUG [RS_OPEN_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestHBaseWalOnEC 3ea3128e85b661a549ecfc427d57b88a 2024-12-08T05:51:17,528 DEBUG [RS_OPEN_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1733637077116.3ea3128e85b661a549ecfc427d57b88a.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T05:51:17,528 DEBUG [RS_OPEN_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 3ea3128e85b661a549ecfc427d57b88a 2024-12-08T05:51:17,528 DEBUG [RS_OPEN_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 3ea3128e85b661a549ecfc427d57b88a 2024-12-08T05:51:17,530 INFO [StoreOpener-3ea3128e85b661a549ecfc427d57b88a-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 3ea3128e85b661a549ecfc427d57b88a 2024-12-08T05:51:17,533 INFO [StoreOpener-3ea3128e85b661a549ecfc427d57b88a-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 3ea3128e85b661a549ecfc427d57b88a columnFamilyName cf 2024-12-08T05:51:17,533 DEBUG [StoreOpener-3ea3128e85b661a549ecfc427d57b88a-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T05:51:17,534 INFO [StoreOpener-3ea3128e85b661a549ecfc427d57b88a-1 {}] regionserver.HStore(327): Store=3ea3128e85b661a549ecfc427d57b88a/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-08T05:51:17,534 DEBUG [RS_OPEN_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 3ea3128e85b661a549ecfc427d57b88a 2024-12-08T05:51:17,535 DEBUG [RS_OPEN_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43739/user/jenkins/test-data/701611d3-3b45-66bc-65ba-44de2053beb1/data/default/TestHBaseWalOnEC/3ea3128e85b661a549ecfc427d57b88a 2024-12-08T05:51:17,536 DEBUG [RS_OPEN_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43739/user/jenkins/test-data/701611d3-3b45-66bc-65ba-44de2053beb1/data/default/TestHBaseWalOnEC/3ea3128e85b661a549ecfc427d57b88a 2024-12-08T05:51:17,536 DEBUG [RS_OPEN_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 3ea3128e85b661a549ecfc427d57b88a 2024-12-08T05:51:17,536 DEBUG [RS_OPEN_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 3ea3128e85b661a549ecfc427d57b88a 2024-12-08T05:51:17,539 DEBUG [RS_OPEN_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 3ea3128e85b661a549ecfc427d57b88a 2024-12-08T05:51:17,543 DEBUG [RS_OPEN_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43739/user/jenkins/test-data/701611d3-3b45-66bc-65ba-44de2053beb1/data/default/TestHBaseWalOnEC/3ea3128e85b661a549ecfc427d57b88a/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-08T05:51:17,544 INFO [RS_OPEN_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 3ea3128e85b661a549ecfc427d57b88a; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=60857541, jitterRate=-0.09315197169780731}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-08T05:51:17,544 DEBUG [RS_OPEN_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 3ea3128e85b661a549ecfc427d57b88a 2024-12-08T05:51:17,545 DEBUG [RS_OPEN_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 3ea3128e85b661a549ecfc427d57b88a: Running coprocessor pre-open hook at 1733637077529Writing region info on filesystem at 1733637077529Initializing all the Stores at 1733637077530 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733637077530Cleaning up temporary data from old regions at 1733637077536 (+6 ms)Running coprocessor post-open hooks at 1733637077544 (+8 ms)Region opened successfully at 1733637077545 (+1 ms) 2024-12-08T05:51:17,547 INFO [RS_OPEN_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestHBaseWalOnEC,,1733637077116.3ea3128e85b661a549ecfc427d57b88a., pid=6, masterSystemTime=1733637077521 2024-12-08T05:51:17,550 DEBUG [RS_OPEN_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestHBaseWalOnEC,,1733637077116.3ea3128e85b661a549ecfc427d57b88a. 2024-12-08T05:51:17,550 INFO [RS_OPEN_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestHBaseWalOnEC,,1733637077116.3ea3128e85b661a549ecfc427d57b88a. 2024-12-08T05:51:17,552 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=3ea3128e85b661a549ecfc427d57b88a, regionState=OPEN, openSeqNum=2, regionLocation=0d942cb2025d,34051,1733637075001 2024-12-08T05:51:17,555 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 3ea3128e85b661a549ecfc427d57b88a, server=0d942cb2025d,34051,1733637075001 because future has completed 2024-12-08T05:51:17,560 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-08T05:51:17,560 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 3ea3128e85b661a549ecfc427d57b88a, server=0d942cb2025d,34051,1733637075001 in 189 msec 2024-12-08T05:51:17,563 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-08T05:51:17,563 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=3ea3128e85b661a549ecfc427d57b88a, ASSIGN in 356 msec 2024-12-08T05:51:17,564 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-08T05:51:17,565 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733637077565"}]},"ts":"1733637077565"} 2024-12-08T05:51:17,567 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLED in hbase:meta 2024-12-08T05:51:17,569 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_POST_OPERATION 2024-12-08T05:51:17,572 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC in 446 msec 2024-12-08T05:51:17,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33841 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-08T05:51:17,766 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestHBaseWalOnEC completed 2024-12-08T05:51:17,766 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table TestHBaseWalOnEC get assigned. Timeout = 60000ms 2024-12-08T05:51:17,768 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-08T05:51:17,773 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table TestHBaseWalOnEC assigned to meta. Checking AM states. 2024-12-08T05:51:17,773 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-08T05:51:17,774 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table TestHBaseWalOnEC assigned. 2024-12-08T05:51:17,781 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestHBaseWalOnEC', row='row', locateType=CURRENT is [region=TestHBaseWalOnEC,,1733637077116.3ea3128e85b661a549ecfc427d57b88a., hostname=0d942cb2025d,34051,1733637075001, seqNum=2] 2024-12-08T05:51:17,790 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33841 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestHBaseWalOnEC 2024-12-08T05:51:17,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33841 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC 2024-12-08T05:51:17,797 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_PREPARE 2024-12-08T05:51:17,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33841 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-08T05:51:17,799 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-08T05:51:17,800 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-08T05:51:17,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33841 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-08T05:51:17,961 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34051 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-12-08T05:51:17,961 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d942cb2025d:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestHBaseWalOnEC,,1733637077116.3ea3128e85b661a549ecfc427d57b88a. 2024-12-08T05:51:17,965 INFO [RS_FLUSH_OPERATIONS-regionserver/0d942cb2025d:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing 3ea3128e85b661a549ecfc427d57b88a 1/1 column families, dataSize=32 B heapSize=360 B 2024-12-08T05:51:18,020 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d942cb2025d:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43739/user/jenkins/test-data/701611d3-3b45-66bc-65ba-44de2053beb1/data/default/TestHBaseWalOnEC/3ea3128e85b661a549ecfc427d57b88a/.tmp/cf/a68928e371af4b62be04d8f69d6a50ad is 36, key is row/cf:cq/1733637077783/Put/seqid=0 2024-12-08T05:51:18,027 WARN [RS_FLUSH_OPERATIONS-regionserver/0d942cb2025d:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-08T05:51:18,027 WARN [RS_FLUSH_OPERATIONS-regionserver/0d942cb2025d:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-08T05:51:18,032 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1100420264_22 at /127.0.0.1:47870 [Receiving block BP-1565006184-172.17.0.2-1733637071052:blk_-9223372036854775648_1024] {}] datanode.DataXceiver(331): 127.0.0.1:32973:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:47870 dst: /127.0.0.1:32973 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T05:51:18,037 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32973 is added to blk_-9223372036854775648_1025 (size=4787) 2024-12-08T05:51:18,038 WARN [RS_FLUSH_OPERATIONS-regionserver/0d942cb2025d:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-08T05:51:18,038 INFO [RS_FLUSH_OPERATIONS-regionserver/0d942cb2025d:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=32 B at sequenceid=5 (bloomFilter=false), to=hdfs://localhost:43739/user/jenkins/test-data/701611d3-3b45-66bc-65ba-44de2053beb1/data/default/TestHBaseWalOnEC/3ea3128e85b661a549ecfc427d57b88a/.tmp/cf/a68928e371af4b62be04d8f69d6a50ad 2024-12-08T05:51:18,082 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d942cb2025d:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43739/user/jenkins/test-data/701611d3-3b45-66bc-65ba-44de2053beb1/data/default/TestHBaseWalOnEC/3ea3128e85b661a549ecfc427d57b88a/.tmp/cf/a68928e371af4b62be04d8f69d6a50ad as hdfs://localhost:43739/user/jenkins/test-data/701611d3-3b45-66bc-65ba-44de2053beb1/data/default/TestHBaseWalOnEC/3ea3128e85b661a549ecfc427d57b88a/cf/a68928e371af4b62be04d8f69d6a50ad 2024-12-08T05:51:18,092 INFO [RS_FLUSH_OPERATIONS-regionserver/0d942cb2025d:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43739/user/jenkins/test-data/701611d3-3b45-66bc-65ba-44de2053beb1/data/default/TestHBaseWalOnEC/3ea3128e85b661a549ecfc427d57b88a/cf/a68928e371af4b62be04d8f69d6a50ad, entries=1, sequenceid=5, filesize=4.7 K 2024-12-08T05:51:18,099 INFO [RS_FLUSH_OPERATIONS-regionserver/0d942cb2025d:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~32 B/32, heapSize ~344 B/344, currentSize=0 B/0 for 3ea3128e85b661a549ecfc427d57b88a in 133ms, sequenceid=5, compaction requested=false 2024-12-08T05:51:18,100 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d942cb2025d:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestHBaseWalOnEC' 2024-12-08T05:51:18,102 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d942cb2025d:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for 3ea3128e85b661a549ecfc427d57b88a: 2024-12-08T05:51:18,102 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d942cb2025d:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestHBaseWalOnEC,,1733637077116.3ea3128e85b661a549ecfc427d57b88a. 2024-12-08T05:51:18,103 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d942cb2025d:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-12-08T05:51:18,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33841 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-12-08T05:51:18,111 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-12-08T05:51:18,112 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 307 msec 2024-12-08T05:51:18,115 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC in 321 msec 2024-12-08T05:51:18,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33841 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-08T05:51:18,116 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestHBaseWalOnEC completed 2024-12-08T05:51:18,130 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-08T05:51:18,131 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-08T05:51:18,131 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-08T05:51:18,137 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T05:51:18,138 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T05:51:18,138 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-08T05:51:18,138 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-08T05:51:18,138 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=655794698, stopped=false 2024-12-08T05:51:18,139 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=0d942cb2025d,33841,1733637074167 2024-12-08T05:51:18,141 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45547-0x10190a054770001, quorum=127.0.0.1:59278, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-08T05:51:18,141 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33841-0x10190a054770000, quorum=127.0.0.1:59278, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-08T05:51:18,141 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33005-0x10190a054770002, quorum=127.0.0.1:59278, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-08T05:51:18,141 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45547-0x10190a054770001, quorum=127.0.0.1:59278, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T05:51:18,141 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33005-0x10190a054770002, quorum=127.0.0.1:59278, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T05:51:18,141 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33841-0x10190a054770000, quorum=127.0.0.1:59278, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T05:51:18,141 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34051-0x10190a054770003, quorum=127.0.0.1:59278, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-08T05:51:18,141 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34051-0x10190a054770003, quorum=127.0.0.1:59278, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T05:51:18,141 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-08T05:51:18,141 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-08T05:51:18,142 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-08T05:51:18,142 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T05:51:18,142 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:45547-0x10190a054770001, quorum=127.0.0.1:59278, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-08T05:51:18,142 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '0d942cb2025d,45547,1733637074845' ***** 2024-12-08T05:51:18,142 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-08T05:51:18,142 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:33005-0x10190a054770002, quorum=127.0.0.1:59278, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-08T05:51:18,142 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:34051-0x10190a054770003, quorum=127.0.0.1:59278, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-08T05:51:18,143 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:33841-0x10190a054770000, quorum=127.0.0.1:59278, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-08T05:51:18,143 INFO [RS:0;0d942cb2025d:45547 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-08T05:51:18,143 INFO [RS:0;0d942cb2025d:45547 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-08T05:51:18,143 INFO [RS:0;0d942cb2025d:45547 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-08T05:51:18,143 INFO [RS:0;0d942cb2025d:45547 {}] regionserver.HRegionServer(959): stopping server 0d942cb2025d,45547,1733637074845 2024-12-08T05:51:18,143 INFO [RS:0;0d942cb2025d:45547 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-08T05:51:18,144 INFO [RS:0;0d942cb2025d:45547 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;0d942cb2025d:45547. 2024-12-08T05:51:18,144 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '0d942cb2025d,33005,1733637074950' ***** 2024-12-08T05:51:18,144 DEBUG [RS:0;0d942cb2025d:45547 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-08T05:51:18,144 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-08T05:51:18,144 DEBUG [RS:0;0d942cb2025d:45547 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T05:51:18,144 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-08T05:51:18,144 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '0d942cb2025d,34051,1733637075001' ***** 2024-12-08T05:51:18,144 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-08T05:51:18,144 INFO [RS:1;0d942cb2025d:33005 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-08T05:51:18,144 INFO [RS:0;0d942cb2025d:45547 {}] regionserver.HRegionServer(976): stopping server 0d942cb2025d,45547,1733637074845; all regions closed. 2024-12-08T05:51:18,144 INFO [RS:1;0d942cb2025d:33005 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-08T05:51:18,144 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-08T05:51:18,144 INFO [RS:1;0d942cb2025d:33005 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-08T05:51:18,144 INFO [RS:1;0d942cb2025d:33005 {}] regionserver.HRegionServer(959): stopping server 0d942cb2025d,33005,1733637074950 2024-12-08T05:51:18,144 INFO [RS:1;0d942cb2025d:33005 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-08T05:51:18,144 INFO [RS:1;0d942cb2025d:33005 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;0d942cb2025d:33005. 2024-12-08T05:51:18,145 DEBUG [RS:1;0d942cb2025d:33005 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-08T05:51:18,145 DEBUG [RS:1;0d942cb2025d:33005 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T05:51:18,145 INFO [RS:2;0d942cb2025d:34051 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-08T05:51:18,145 INFO [RS:1;0d942cb2025d:33005 {}] regionserver.HRegionServer(976): stopping server 0d942cb2025d,33005,1733637074950; all regions closed. 2024-12-08T05:51:18,145 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-08T05:51:18,145 INFO [RS:2;0d942cb2025d:34051 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-08T05:51:18,145 INFO [RS:2;0d942cb2025d:34051 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-08T05:51:18,145 INFO [RS:2;0d942cb2025d:34051 {}] regionserver.HRegionServer(3091): Received CLOSE for 3ea3128e85b661a549ecfc427d57b88a 2024-12-08T05:51:18,146 INFO [RS:2;0d942cb2025d:34051 {}] regionserver.HRegionServer(959): stopping server 0d942cb2025d,34051,1733637075001 2024-12-08T05:51:18,146 INFO [RS:2;0d942cb2025d:34051 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-08T05:51:18,146 INFO [RS:2;0d942cb2025d:34051 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:2;0d942cb2025d:34051. 2024-12-08T05:51:18,146 DEBUG [RS:2;0d942cb2025d:34051 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-08T05:51:18,146 DEBUG [RS:2;0d942cb2025d:34051 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T05:51:18,146 DEBUG [RS_CLOSE_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 3ea3128e85b661a549ecfc427d57b88a, disabling compactions & flushes 2024-12-08T05:51:18,146 INFO [RS:2;0d942cb2025d:34051 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-08T05:51:18,146 INFO [RS_CLOSE_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1733637077116.3ea3128e85b661a549ecfc427d57b88a. 2024-12-08T05:51:18,146 INFO [RS:2;0d942cb2025d:34051 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-08T05:51:18,146 INFO [RS:2;0d942cb2025d:34051 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-08T05:51:18,146 DEBUG [RS_CLOSE_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1733637077116.3ea3128e85b661a549ecfc427d57b88a. 2024-12-08T05:51:18,146 DEBUG [RS_CLOSE_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1733637077116.3ea3128e85b661a549ecfc427d57b88a. after waiting 0 ms 2024-12-08T05:51:18,147 INFO [RS:2;0d942cb2025d:34051 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-08T05:51:18,147 DEBUG [RS_CLOSE_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1733637077116.3ea3128e85b661a549ecfc427d57b88a. 2024-12-08T05:51:18,148 INFO [RS:2;0d942cb2025d:34051 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-12-08T05:51:18,148 DEBUG [RS:2;0d942cb2025d:34051 {}] regionserver.HRegionServer(1325): Online Regions={3ea3128e85b661a549ecfc427d57b88a=TestHBaseWalOnEC,,1733637077116.3ea3128e85b661a549ecfc427d57b88a., 1588230740=hbase:meta,,1.1588230740} 2024-12-08T05:51:18,148 DEBUG [RS:2;0d942cb2025d:34051 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 3ea3128e85b661a549ecfc427d57b88a 2024-12-08T05:51:18,148 DEBUG [RS_CLOSE_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-08T05:51:18,149 INFO [RS_CLOSE_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-08T05:51:18,149 DEBUG [RS_CLOSE_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-08T05:51:18,149 DEBUG [RS_CLOSE_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-08T05:51:18,149 DEBUG [RS_CLOSE_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-08T05:51:18,149 INFO [RS_CLOSE_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.34 KB heapSize=3.38 KB 2024-12-08T05:51:18,150 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32973 is added to blk_1073741826_1016 (size=93) 2024-12-08T05:51:18,150 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39331 is added to blk_1073741827_1017 (size=93) 2024-12-08T05:51:18,151 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40739 is added to blk_1073741827_1017 (size=93) 2024-12-08T05:51:18,152 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39331 is added to blk_1073741826_1016 (size=93) 2024-12-08T05:51:18,152 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32973 is added to blk_1073741827_1017 (size=93) 2024-12-08T05:51:18,154 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40739 is added to blk_1073741826_1016 (size=93) 2024-12-08T05:51:18,158 DEBUG [RS:0;0d942cb2025d:45547 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/701611d3-3b45-66bc-65ba-44de2053beb1/oldWALs 2024-12-08T05:51:18,158 INFO [RS:0;0d942cb2025d:45547 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 0d942cb2025d%2C45547%2C1733637074845:(num 1733637076307) 2024-12-08T05:51:18,158 DEBUG [RS:0;0d942cb2025d:45547 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T05:51:18,158 INFO [RS:0;0d942cb2025d:45547 {}] regionserver.LeaseManager(133): Closed leases 2024-12-08T05:51:18,159 DEBUG [RS:1;0d942cb2025d:33005 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/701611d3-3b45-66bc-65ba-44de2053beb1/oldWALs 2024-12-08T05:51:18,159 INFO [RS:1;0d942cb2025d:33005 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 0d942cb2025d%2C33005%2C1733637074950:(num 1733637076307) 2024-12-08T05:51:18,159 INFO [RS:0;0d942cb2025d:45547 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-08T05:51:18,159 DEBUG [RS:1;0d942cb2025d:33005 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T05:51:18,159 INFO [RS:1;0d942cb2025d:33005 {}] regionserver.LeaseManager(133): Closed leases 2024-12-08T05:51:18,159 INFO [RS:1;0d942cb2025d:33005 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-08T05:51:18,159 INFO [RS:1;0d942cb2025d:33005 {}] hbase.ChoreService(370): Chore service for: regionserver/0d942cb2025d:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-12-08T05:51:18,159 INFO [RS:0;0d942cb2025d:45547 {}] hbase.ChoreService(370): Chore service for: regionserver/0d942cb2025d:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-12-08T05:51:18,159 INFO [RS:1;0d942cb2025d:33005 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-08T05:51:18,159 INFO [RS:0;0d942cb2025d:45547 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-08T05:51:18,159 INFO [RS:1;0d942cb2025d:33005 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-08T05:51:18,159 INFO [RS:1;0d942cb2025d:33005 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-08T05:51:18,159 INFO [RS:0;0d942cb2025d:45547 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-08T05:51:18,159 INFO [RS:1;0d942cb2025d:33005 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-08T05:51:18,159 INFO [RS:0;0d942cb2025d:45547 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-08T05:51:18,159 INFO [RS:0;0d942cb2025d:45547 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-08T05:51:18,160 INFO [RS:1;0d942cb2025d:33005 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:33005 2024-12-08T05:51:18,160 INFO [RS:0;0d942cb2025d:45547 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:45547 2024-12-08T05:51:18,160 INFO [regionserver/0d942cb2025d:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-08T05:51:18,160 INFO [regionserver/0d942cb2025d:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-08T05:51:18,163 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45547-0x10190a054770001, quorum=127.0.0.1:59278, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/0d942cb2025d,45547,1733637074845 2024-12-08T05:51:18,164 INFO [RS:0;0d942cb2025d:45547 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-08T05:51:18,164 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33841-0x10190a054770000, quorum=127.0.0.1:59278, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-08T05:51:18,165 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33005-0x10190a054770002, quorum=127.0.0.1:59278, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/0d942cb2025d,33005,1733637074950 2024-12-08T05:51:18,165 INFO [RS:1;0d942cb2025d:33005 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-08T05:51:18,165 INFO [regionserver/0d942cb2025d:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-08T05:51:18,167 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [0d942cb2025d,33005,1733637074950] 2024-12-08T05:51:18,169 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/0d942cb2025d,33005,1733637074950 already deleted, retry=false 2024-12-08T05:51:18,169 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 0d942cb2025d,33005,1733637074950 expired; onlineServers=2 2024-12-08T05:51:18,169 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [0d942cb2025d,45547,1733637074845] 2024-12-08T05:51:18,171 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/0d942cb2025d,45547,1733637074845 already deleted, retry=false 2024-12-08T05:51:18,171 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 0d942cb2025d,45547,1733637074845 expired; onlineServers=1 2024-12-08T05:51:18,178 DEBUG [RS_CLOSE_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43739/user/jenkins/test-data/701611d3-3b45-66bc-65ba-44de2053beb1/data/default/TestHBaseWalOnEC/3ea3128e85b661a549ecfc427d57b88a/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-08T05:51:18,181 INFO [RS_CLOSE_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1733637077116.3ea3128e85b661a549ecfc427d57b88a. 2024-12-08T05:51:18,181 DEBUG [RS_CLOSE_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 3ea3128e85b661a549ecfc427d57b88a: Waiting for close lock at 1733637078146Running coprocessor pre-close hooks at 1733637078146Disabling compacts and flushes for region at 1733637078146Disabling writes for close at 1733637078146Writing region close event to WAL at 1733637078152 (+6 ms)Running coprocessor post-close hooks at 1733637078179 (+27 ms)Closed at 1733637078181 (+2 ms) 2024-12-08T05:51:18,181 DEBUG [RS_CLOSE_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestHBaseWalOnEC,,1733637077116.3ea3128e85b661a549ecfc427d57b88a. 2024-12-08T05:51:18,191 DEBUG [RS_CLOSE_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43739/user/jenkins/test-data/701611d3-3b45-66bc-65ba-44de2053beb1/data/hbase/meta/1588230740/.tmp/info/2caa307001db4edb95d224b30777ed9d is 153, key is TestHBaseWalOnEC,,1733637077116.3ea3128e85b661a549ecfc427d57b88a./info:regioninfo/1733637077551/Put/seqid=0 2024-12-08T05:51:18,194 WARN [RS_CLOSE_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-08T05:51:18,194 WARN [RS_CLOSE_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-08T05:51:18,198 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1100420264_22 at /127.0.0.1:36804 [Receiving block BP-1565006184-172.17.0.2-1733637071052:blk_-9223372036854775632_1026] {}] datanode.DataXceiver(331): 127.0.0.1:39331:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36804 dst: /127.0.0.1:39331 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T05:51:18,202 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39331 is added to blk_-9223372036854775632_1027 (size=6637) 2024-12-08T05:51:18,203 WARN [RS_CLOSE_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-08T05:51:18,203 INFO [RS_CLOSE_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.18 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:43739/user/jenkins/test-data/701611d3-3b45-66bc-65ba-44de2053beb1/data/hbase/meta/1588230740/.tmp/info/2caa307001db4edb95d224b30777ed9d 2024-12-08T05:51:18,230 DEBUG [RS_CLOSE_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43739/user/jenkins/test-data/701611d3-3b45-66bc-65ba-44de2053beb1/data/hbase/meta/1588230740/.tmp/ns/4bdb0c4ff9004697acd7a61cc4bb8766 is 43, key is default/ns:d/1733637076943/Put/seqid=0 2024-12-08T05:51:18,233 WARN [RS_CLOSE_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-08T05:51:18,233 WARN [RS_CLOSE_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-08T05:51:18,237 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1100420264_22 at /127.0.0.1:36820 [Receiving block BP-1565006184-172.17.0.2-1733637071052:blk_-9223372036854775616_1028] {}] datanode.DataXceiver(331): 127.0.0.1:39331:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36820 dst: /127.0.0.1:39331 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T05:51:18,241 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39331 is added to blk_-9223372036854775616_1029 (size=5153) 2024-12-08T05:51:18,241 WARN [RS_CLOSE_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-08T05:51:18,242 INFO [RS_CLOSE_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:43739/user/jenkins/test-data/701611d3-3b45-66bc-65ba-44de2053beb1/data/hbase/meta/1588230740/.tmp/ns/4bdb0c4ff9004697acd7a61cc4bb8766 2024-12-08T05:51:18,242 INFO [regionserver/0d942cb2025d:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-08T05:51:18,243 INFO [regionserver/0d942cb2025d:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-08T05:51:18,268 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45547-0x10190a054770001, quorum=127.0.0.1:59278, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-08T05:51:18,268 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45547-0x10190a054770001, quorum=127.0.0.1:59278, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-08T05:51:18,268 INFO [RS:0;0d942cb2025d:45547 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-08T05:51:18,268 INFO [RS:0;0d942cb2025d:45547 {}] regionserver.HRegionServer(1031): Exiting; stopping=0d942cb2025d,45547,1733637074845; zookeeper connection closed. 2024-12-08T05:51:18,268 DEBUG [RS_CLOSE_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43739/user/jenkins/test-data/701611d3-3b45-66bc-65ba-44de2053beb1/data/hbase/meta/1588230740/.tmp/table/16b62389cd414e4f8c8ef567ce37c384 is 52, key is TestHBaseWalOnEC/table:state/1733637077565/Put/seqid=0 2024-12-08T05:51:18,268 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33005-0x10190a054770002, quorum=127.0.0.1:59278, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-08T05:51:18,268 INFO [RS:1;0d942cb2025d:33005 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-08T05:51:18,268 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33005-0x10190a054770002, quorum=127.0.0.1:59278, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-08T05:51:18,268 INFO [RS:1;0d942cb2025d:33005 {}] regionserver.HRegionServer(1031): Exiting; stopping=0d942cb2025d,33005,1733637074950; zookeeper connection closed. 2024-12-08T05:51:18,269 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@1124b9e3 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@1124b9e3 2024-12-08T05:51:18,269 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@1ff33a2b {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@1ff33a2b 2024-12-08T05:51:18,271 WARN [RS_CLOSE_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-08T05:51:18,271 WARN [RS_CLOSE_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-08T05:51:18,277 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1100420264_22 at /127.0.0.1:47886 [Receiving block BP-1565006184-172.17.0.2-1733637071052:blk_-9223372036854775600_1030] {}] datanode.DataXceiver(331): 127.0.0.1:32973:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:47886 dst: /127.0.0.1:32973 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T05:51:18,280 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32973 is added to blk_-9223372036854775600_1031 (size=5249) 2024-12-08T05:51:18,281 WARN [RS_CLOSE_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-08T05:51:18,281 INFO [RS_CLOSE_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=96 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:43739/user/jenkins/test-data/701611d3-3b45-66bc-65ba-44de2053beb1/data/hbase/meta/1588230740/.tmp/table/16b62389cd414e4f8c8ef567ce37c384 2024-12-08T05:51:18,291 DEBUG [RS_CLOSE_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43739/user/jenkins/test-data/701611d3-3b45-66bc-65ba-44de2053beb1/data/hbase/meta/1588230740/.tmp/info/2caa307001db4edb95d224b30777ed9d as hdfs://localhost:43739/user/jenkins/test-data/701611d3-3b45-66bc-65ba-44de2053beb1/data/hbase/meta/1588230740/info/2caa307001db4edb95d224b30777ed9d 2024-12-08T05:51:18,299 INFO [RS_CLOSE_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43739/user/jenkins/test-data/701611d3-3b45-66bc-65ba-44de2053beb1/data/hbase/meta/1588230740/info/2caa307001db4edb95d224b30777ed9d, entries=10, sequenceid=11, filesize=6.5 K 2024-12-08T05:51:18,301 DEBUG [RS_CLOSE_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43739/user/jenkins/test-data/701611d3-3b45-66bc-65ba-44de2053beb1/data/hbase/meta/1588230740/.tmp/ns/4bdb0c4ff9004697acd7a61cc4bb8766 as hdfs://localhost:43739/user/jenkins/test-data/701611d3-3b45-66bc-65ba-44de2053beb1/data/hbase/meta/1588230740/ns/4bdb0c4ff9004697acd7a61cc4bb8766 2024-12-08T05:51:18,310 INFO [RS_CLOSE_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43739/user/jenkins/test-data/701611d3-3b45-66bc-65ba-44de2053beb1/data/hbase/meta/1588230740/ns/4bdb0c4ff9004697acd7a61cc4bb8766, entries=2, sequenceid=11, filesize=5.0 K 2024-12-08T05:51:18,312 DEBUG [RS_CLOSE_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43739/user/jenkins/test-data/701611d3-3b45-66bc-65ba-44de2053beb1/data/hbase/meta/1588230740/.tmp/table/16b62389cd414e4f8c8ef567ce37c384 as hdfs://localhost:43739/user/jenkins/test-data/701611d3-3b45-66bc-65ba-44de2053beb1/data/hbase/meta/1588230740/table/16b62389cd414e4f8c8ef567ce37c384 2024-12-08T05:51:18,321 INFO [RS_CLOSE_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43739/user/jenkins/test-data/701611d3-3b45-66bc-65ba-44de2053beb1/data/hbase/meta/1588230740/table/16b62389cd414e4f8c8ef567ce37c384, entries=2, sequenceid=11, filesize=5.1 K 2024-12-08T05:51:18,323 INFO [RS_CLOSE_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 173ms, sequenceid=11, compaction requested=false 2024-12-08T05:51:18,323 DEBUG [RS_CLOSE_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-08T05:51:18,332 DEBUG [RS_CLOSE_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43739/user/jenkins/test-data/701611d3-3b45-66bc-65ba-44de2053beb1/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-12-08T05:51:18,333 DEBUG [RS_CLOSE_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-08T05:51:18,333 INFO [RS_CLOSE_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-08T05:51:18,334 DEBUG [RS_CLOSE_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733637078148Running coprocessor pre-close hooks at 1733637078148Disabling compacts and flushes for region at 1733637078148Disabling writes for close at 1733637078149 (+1 ms)Obtaining lock to block concurrent updates at 1733637078149Preparing flush snapshotting stores in 1588230740 at 1733637078149Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1377, getHeapSize=3392, getOffHeapSize=0, getCellsCount=14 at 1733637078151 (+2 ms)Flushing stores of hbase:meta,,1.1588230740 at 1733637078152 (+1 ms)Flushing 1588230740/info: creating writer at 1733637078152Flushing 1588230740/info: appending metadata at 1733637078187 (+35 ms)Flushing 1588230740/info: closing flushed file at 1733637078187Flushing 1588230740/ns: creating writer at 1733637078213 (+26 ms)Flushing 1588230740/ns: appending metadata at 1733637078229 (+16 ms)Flushing 1588230740/ns: closing flushed file at 1733637078229Flushing 1588230740/table: creating writer at 1733637078251 (+22 ms)Flushing 1588230740/table: appending metadata at 1733637078267 (+16 ms)Flushing 1588230740/table: closing flushed file at 1733637078267Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3bdb9d9d: reopening flushed file at 1733637078290 (+23 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@269cf99c: reopening flushed file at 1733637078300 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@445fc90e: reopening flushed file at 1733637078311 (+11 ms)Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 173ms, sequenceid=11, compaction requested=false at 1733637078323 (+12 ms)Writing region close event to WAL at 1733637078325 (+2 ms)Running coprocessor post-close hooks at 1733637078333 (+8 ms)Closed at 1733637078333 2024-12-08T05:51:18,334 DEBUG [RS_CLOSE_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-08T05:51:18,348 INFO [RS:2;0d942cb2025d:34051 {}] regionserver.HRegionServer(976): stopping server 0d942cb2025d,34051,1733637075001; all regions closed. 2024-12-08T05:51:18,352 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40739 is added to blk_1073741829_1019 (size=2751) 2024-12-08T05:51:18,352 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32973 is added to blk_1073741829_1019 (size=2751) 2024-12-08T05:51:18,352 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39331 is added to blk_1073741829_1019 (size=2751) 2024-12-08T05:51:18,355 DEBUG [RS:2;0d942cb2025d:34051 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/701611d3-3b45-66bc-65ba-44de2053beb1/oldWALs 2024-12-08T05:51:18,355 INFO [RS:2;0d942cb2025d:34051 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 0d942cb2025d%2C34051%2C1733637075001.meta:.meta(num 1733637076752) 2024-12-08T05:51:18,358 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40739 is added to blk_1073741828_1018 (size=1298) 2024-12-08T05:51:18,358 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39331 is added to blk_1073741828_1018 (size=1298) 2024-12-08T05:51:18,358 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32973 is added to blk_1073741828_1018 (size=1298) 2024-12-08T05:51:18,361 DEBUG [RS:2;0d942cb2025d:34051 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/701611d3-3b45-66bc-65ba-44de2053beb1/oldWALs 2024-12-08T05:51:18,361 INFO [RS:2;0d942cb2025d:34051 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 0d942cb2025d%2C34051%2C1733637075001:(num 1733637076307) 2024-12-08T05:51:18,361 DEBUG [RS:2;0d942cb2025d:34051 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T05:51:18,361 INFO [RS:2;0d942cb2025d:34051 {}] regionserver.LeaseManager(133): Closed leases 2024-12-08T05:51:18,361 INFO [RS:2;0d942cb2025d:34051 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-08T05:51:18,362 INFO [RS:2;0d942cb2025d:34051 {}] hbase.ChoreService(370): Chore service for: regionserver/0d942cb2025d:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-08T05:51:18,362 INFO [RS:2;0d942cb2025d:34051 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-08T05:51:18,362 INFO [regionserver/0d942cb2025d:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-08T05:51:18,362 INFO [RS:2;0d942cb2025d:34051 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:34051 2024-12-08T05:51:18,366 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33841-0x10190a054770000, quorum=127.0.0.1:59278, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-08T05:51:18,366 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34051-0x10190a054770003, quorum=127.0.0.1:59278, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/0d942cb2025d,34051,1733637075001 2024-12-08T05:51:18,366 INFO [RS:2;0d942cb2025d:34051 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-08T05:51:18,367 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [0d942cb2025d,34051,1733637075001] 2024-12-08T05:51:18,368 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/0d942cb2025d,34051,1733637075001 already deleted, retry=false 2024-12-08T05:51:18,368 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 0d942cb2025d,34051,1733637075001 expired; onlineServers=0 2024-12-08T05:51:18,369 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '0d942cb2025d,33841,1733637074167' ***** 2024-12-08T05:51:18,369 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-08T05:51:18,369 INFO [M:0;0d942cb2025d:33841 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-08T05:51:18,369 INFO [M:0;0d942cb2025d:33841 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-08T05:51:18,369 DEBUG [M:0;0d942cb2025d:33841 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-08T05:51:18,369 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-08T05:51:18,369 DEBUG [M:0;0d942cb2025d:33841 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-08T05:51:18,369 DEBUG [master/0d942cb2025d:0:becomeActiveMaster-HFileCleaner.small.0-1733637075961 {}] cleaner.HFileCleaner(306): Exit Thread[master/0d942cb2025d:0:becomeActiveMaster-HFileCleaner.small.0-1733637075961,5,FailOnTimeoutGroup] 2024-12-08T05:51:18,369 DEBUG [master/0d942cb2025d:0:becomeActiveMaster-HFileCleaner.large.0-1733637075960 {}] cleaner.HFileCleaner(306): Exit Thread[master/0d942cb2025d:0:becomeActiveMaster-HFileCleaner.large.0-1733637075960,5,FailOnTimeoutGroup] 2024-12-08T05:51:18,370 INFO [M:0;0d942cb2025d:33841 {}] hbase.ChoreService(370): Chore service for: master/0d942cb2025d:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-08T05:51:18,370 INFO [M:0;0d942cb2025d:33841 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-08T05:51:18,370 DEBUG [M:0;0d942cb2025d:33841 {}] master.HMaster(1795): Stopping service threads 2024-12-08T05:51:18,370 INFO [M:0;0d942cb2025d:33841 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-08T05:51:18,370 INFO [M:0;0d942cb2025d:33841 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-08T05:51:18,371 INFO [M:0;0d942cb2025d:33841 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-08T05:51:18,371 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33841-0x10190a054770000, quorum=127.0.0.1:59278, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-08T05:51:18,371 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-08T05:51:18,371 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33841-0x10190a054770000, quorum=127.0.0.1:59278, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T05:51:18,371 DEBUG [M:0;0d942cb2025d:33841 {}] zookeeper.ZKUtil(347): master:33841-0x10190a054770000, quorum=127.0.0.1:59278, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-08T05:51:18,371 WARN [M:0;0d942cb2025d:33841 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-08T05:51:18,372 INFO [M:0;0d942cb2025d:33841 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:43739/user/jenkins/test-data/701611d3-3b45-66bc-65ba-44de2053beb1/.lastflushedseqids 2024-12-08T05:51:18,381 WARN [M:0;0d942cb2025d:33841 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-08T05:51:18,381 WARN [M:0;0d942cb2025d:33841 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-08T05:51:18,384 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_598324605_22 at /127.0.0.1:47904 [Receiving block BP-1565006184-172.17.0.2-1733637071052:blk_-9223372036854775584_1032] {}] datanode.DataXceiver(331): 127.0.0.1:32973:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:47904 dst: /127.0.0.1:32973 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T05:51:18,387 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32973 is added to blk_-9223372036854775584_1033 (size=127) 2024-12-08T05:51:18,388 WARN [M:0;0d942cb2025d:33841 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-08T05:51:18,388 INFO [M:0;0d942cb2025d:33841 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-08T05:51:18,388 INFO [M:0;0d942cb2025d:33841 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-08T05:51:18,388 DEBUG [M:0;0d942cb2025d:33841 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-08T05:51:18,388 INFO [M:0;0d942cb2025d:33841 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T05:51:18,388 DEBUG [M:0;0d942cb2025d:33841 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T05:51:18,388 DEBUG [M:0;0d942cb2025d:33841 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-08T05:51:18,388 DEBUG [M:0;0d942cb2025d:33841 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T05:51:18,389 INFO [M:0;0d942cb2025d:33841 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=26.84 KB heapSize=34.13 KB 2024-12-08T05:51:18,408 DEBUG [M:0;0d942cb2025d:33841 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43739/user/jenkins/test-data/701611d3-3b45-66bc-65ba-44de2053beb1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/b1e703496f244cc8a001049a974eeba2 is 82, key is hbase:meta,,1/info:regioninfo/1733637076840/Put/seqid=0 2024-12-08T05:51:18,410 WARN [M:0;0d942cb2025d:33841 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-08T05:51:18,410 WARN [M:0;0d942cb2025d:33841 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-08T05:51:18,413 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_598324605_22 at /127.0.0.1:58448 [Receiving block BP-1565006184-172.17.0.2-1733637071052:blk_-9223372036854775568_1034] {}] datanode.DataXceiver(331): 127.0.0.1:40739:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:58448 dst: /127.0.0.1:40739 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T05:51:18,417 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40739 is added to blk_-9223372036854775568_1035 (size=5672) 2024-12-08T05:51:18,417 WARN [M:0;0d942cb2025d:33841 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-08T05:51:18,418 INFO [M:0;0d942cb2025d:33841 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:43739/user/jenkins/test-data/701611d3-3b45-66bc-65ba-44de2053beb1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/b1e703496f244cc8a001049a974eeba2 2024-12-08T05:51:18,446 DEBUG [M:0;0d942cb2025d:33841 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43739/user/jenkins/test-data/701611d3-3b45-66bc-65ba-44de2053beb1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/9bd358cd2bf847f5bd4fde2af741d55f is 748, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1733637077571/Put/seqid=0 2024-12-08T05:51:18,448 WARN [M:0;0d942cb2025d:33841 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-08T05:51:18,449 WARN [M:0;0d942cb2025d:33841 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-08T05:51:18,451 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_598324605_22 at /127.0.0.1:47924 [Receiving block BP-1565006184-172.17.0.2-1733637071052:blk_-9223372036854775552_1036] {}] datanode.DataXceiver(331): 127.0.0.1:32973:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:47924 dst: /127.0.0.1:32973 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T05:51:18,456 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32973 is added to blk_-9223372036854775552_1037 (size=6440) 2024-12-08T05:51:18,456 WARN [M:0;0d942cb2025d:33841 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-08T05:51:18,457 INFO [M:0;0d942cb2025d:33841 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.15 KB at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:43739/user/jenkins/test-data/701611d3-3b45-66bc-65ba-44de2053beb1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/9bd358cd2bf847f5bd4fde2af741d55f 2024-12-08T05:51:18,468 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34051-0x10190a054770003, quorum=127.0.0.1:59278, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-08T05:51:18,468 INFO [RS:2;0d942cb2025d:34051 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-08T05:51:18,468 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34051-0x10190a054770003, quorum=127.0.0.1:59278, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-08T05:51:18,468 INFO [RS:2;0d942cb2025d:34051 {}] regionserver.HRegionServer(1031): Exiting; stopping=0d942cb2025d,34051,1733637075001; zookeeper connection closed. 2024-12-08T05:51:18,468 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@6fbe546a {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@6fbe546a 2024-12-08T05:51:18,469 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 3 regionserver(s) complete 2024-12-08T05:51:18,485 DEBUG [M:0;0d942cb2025d:33841 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43739/user/jenkins/test-data/701611d3-3b45-66bc-65ba-44de2053beb1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/9d64816f9d0a46db82164f0577793845 is 69, key is 0d942cb2025d,33005,1733637074950/rs:state/1733637076053/Put/seqid=0 2024-12-08T05:51:18,487 WARN [M:0;0d942cb2025d:33841 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-08T05:51:18,487 WARN [M:0;0d942cb2025d:33841 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-08T05:51:18,489 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_598324605_22 at /127.0.0.1:58458 [Receiving block BP-1565006184-172.17.0.2-1733637071052:blk_-9223372036854775536_1038] {}] datanode.DataXceiver(331): 127.0.0.1:40739:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:58458 dst: /127.0.0.1:40739 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T05:51:18,493 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40739 is added to blk_-9223372036854775536_1039 (size=5294) 2024-12-08T05:51:18,494 WARN [M:0;0d942cb2025d:33841 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-08T05:51:18,494 INFO [M:0;0d942cb2025d:33841 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=195 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:43739/user/jenkins/test-data/701611d3-3b45-66bc-65ba-44de2053beb1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/9d64816f9d0a46db82164f0577793845 2024-12-08T05:51:18,503 DEBUG [M:0;0d942cb2025d:33841 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43739/user/jenkins/test-data/701611d3-3b45-66bc-65ba-44de2053beb1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/b1e703496f244cc8a001049a974eeba2 as hdfs://localhost:43739/user/jenkins/test-data/701611d3-3b45-66bc-65ba-44de2053beb1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/b1e703496f244cc8a001049a974eeba2 2024-12-08T05:51:18,510 INFO [M:0;0d942cb2025d:33841 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43739/user/jenkins/test-data/701611d3-3b45-66bc-65ba-44de2053beb1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/b1e703496f244cc8a001049a974eeba2, entries=8, sequenceid=72, filesize=5.5 K 2024-12-08T05:51:18,512 DEBUG [M:0;0d942cb2025d:33841 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43739/user/jenkins/test-data/701611d3-3b45-66bc-65ba-44de2053beb1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/9bd358cd2bf847f5bd4fde2af741d55f as hdfs://localhost:43739/user/jenkins/test-data/701611d3-3b45-66bc-65ba-44de2053beb1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/9bd358cd2bf847f5bd4fde2af741d55f 2024-12-08T05:51:18,520 INFO [M:0;0d942cb2025d:33841 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43739/user/jenkins/test-data/701611d3-3b45-66bc-65ba-44de2053beb1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/9bd358cd2bf847f5bd4fde2af741d55f, entries=8, sequenceid=72, filesize=6.3 K 2024-12-08T05:51:18,521 DEBUG [M:0;0d942cb2025d:33841 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43739/user/jenkins/test-data/701611d3-3b45-66bc-65ba-44de2053beb1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/9d64816f9d0a46db82164f0577793845 as hdfs://localhost:43739/user/jenkins/test-data/701611d3-3b45-66bc-65ba-44de2053beb1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/9d64816f9d0a46db82164f0577793845 2024-12-08T05:51:18,528 INFO [M:0;0d942cb2025d:33841 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43739/user/jenkins/test-data/701611d3-3b45-66bc-65ba-44de2053beb1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/9d64816f9d0a46db82164f0577793845, entries=3, sequenceid=72, filesize=5.2 K 2024-12-08T05:51:18,530 INFO [M:0;0d942cb2025d:33841 {}] regionserver.HRegion(3140): Finished flush of dataSize ~26.84 KB/27480, heapSize ~33.83 KB/34640, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 142ms, sequenceid=72, compaction requested=false 2024-12-08T05:51:18,531 INFO [M:0;0d942cb2025d:33841 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T05:51:18,531 DEBUG [M:0;0d942cb2025d:33841 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733637078388Disabling compacts and flushes for region at 1733637078388Disabling writes for close at 1733637078388Obtaining lock to block concurrent updates at 1733637078389 (+1 ms)Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733637078389Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=27480, getHeapSize=34880, getOffHeapSize=0, getCellsCount=85 at 1733637078389Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733637078390 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733637078390Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733637078407 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733637078407Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733637078426 (+19 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733637078445 (+19 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733637078445Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733637078465 (+20 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733637078484 (+19 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733637078484Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@79842134: reopening flushed file at 1733637078501 (+17 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4fc3047b: reopening flushed file at 1733637078510 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@67c6dcef: reopening flushed file at 1733637078520 (+10 ms)Finished flush of dataSize ~26.84 KB/27480, heapSize ~33.83 KB/34640, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 142ms, sequenceid=72, compaction requested=false at 1733637078530 (+10 ms)Writing region close event to WAL at 1733637078531 (+1 ms)Closed at 1733637078531 2024-12-08T05:51:18,534 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40739 is added to blk_1073741825_1011 (size=32683) 2024-12-08T05:51:18,534 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32973 is added to blk_1073741825_1011 (size=32683) 2024-12-08T05:51:18,535 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39331 is added to blk_1073741825_1011 (size=32683) 2024-12-08T05:51:18,536 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-08T05:51:18,536 INFO [M:0;0d942cb2025d:33841 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-08T05:51:18,536 INFO [M:0;0d942cb2025d:33841 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:33841 2024-12-08T05:51:18,537 INFO [M:0;0d942cb2025d:33841 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-08T05:51:18,639 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33841-0x10190a054770000, quorum=127.0.0.1:59278, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-08T05:51:18,639 INFO [M:0;0d942cb2025d:33841 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-08T05:51:18,640 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33841-0x10190a054770000, quorum=127.0.0.1:59278, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-08T05:51:18,643 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2e59159d{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-08T05:51:18,646 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@a8e922f{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-08T05:51:18,646 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-08T05:51:18,646 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@24f92c39{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-08T05:51:18,646 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@c62369b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c55d7a9c-97fe-bc49-130e-4731f1cbd10f/hadoop.log.dir/,STOPPED} 2024-12-08T05:51:18,649 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-08T05:51:18,649 WARN [BP-1565006184-172.17.0.2-1733637071052 heartbeating to localhost/127.0.0.1:43739 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-08T05:51:18,649 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-08T05:51:18,649 WARN [BP-1565006184-172.17.0.2-1733637071052 heartbeating to localhost/127.0.0.1:43739 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1565006184-172.17.0.2-1733637071052 (Datanode Uuid 4af3e1bd-b3db-4a80-ae2a-99cfff2bfba8) service to localhost/127.0.0.1:43739 2024-12-08T05:51:18,650 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c55d7a9c-97fe-bc49-130e-4731f1cbd10f/cluster_2f457612-755b-caa9-be61-f401a75fa4bd/data/data5/current/BP-1565006184-172.17.0.2-1733637071052 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-08T05:51:18,650 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c55d7a9c-97fe-bc49-130e-4731f1cbd10f/cluster_2f457612-755b-caa9-be61-f401a75fa4bd/data/data6/current/BP-1565006184-172.17.0.2-1733637071052 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-08T05:51:18,651 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-08T05:51:18,653 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1c6b8f01{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-08T05:51:18,653 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@11f28dd2{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-08T05:51:18,653 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-08T05:51:18,653 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7fa8fa5c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-08T05:51:18,653 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6463ad04{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c55d7a9c-97fe-bc49-130e-4731f1cbd10f/hadoop.log.dir/,STOPPED} 2024-12-08T05:51:18,654 WARN [BP-1565006184-172.17.0.2-1733637071052 heartbeating to localhost/127.0.0.1:43739 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-08T05:51:18,654 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-08T05:51:18,654 WARN [BP-1565006184-172.17.0.2-1733637071052 heartbeating to localhost/127.0.0.1:43739 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1565006184-172.17.0.2-1733637071052 (Datanode Uuid 430f2c8c-be3c-4662-9599-8614d22bb597) service to localhost/127.0.0.1:43739 2024-12-08T05:51:18,654 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-08T05:51:18,655 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c55d7a9c-97fe-bc49-130e-4731f1cbd10f/cluster_2f457612-755b-caa9-be61-f401a75fa4bd/data/data3/current/BP-1565006184-172.17.0.2-1733637071052 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-08T05:51:18,655 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c55d7a9c-97fe-bc49-130e-4731f1cbd10f/cluster_2f457612-755b-caa9-be61-f401a75fa4bd/data/data4/current/BP-1565006184-172.17.0.2-1733637071052 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-08T05:51:18,655 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-08T05:51:18,657 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4839957b{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-08T05:51:18,658 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5306f615{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-08T05:51:18,658 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-08T05:51:18,658 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1a2478ad{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-08T05:51:18,658 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@550154bd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c55d7a9c-97fe-bc49-130e-4731f1cbd10f/hadoop.log.dir/,STOPPED} 2024-12-08T05:51:18,659 WARN [BP-1565006184-172.17.0.2-1733637071052 heartbeating to localhost/127.0.0.1:43739 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-08T05:51:18,659 WARN [BP-1565006184-172.17.0.2-1733637071052 heartbeating to localhost/127.0.0.1:43739 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1565006184-172.17.0.2-1733637071052 (Datanode Uuid 1e205595-8b3c-4309-baa9-f9b51c68fce3) service to localhost/127.0.0.1:43739 2024-12-08T05:51:18,660 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c55d7a9c-97fe-bc49-130e-4731f1cbd10f/cluster_2f457612-755b-caa9-be61-f401a75fa4bd/data/data1/current/BP-1565006184-172.17.0.2-1733637071052 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-08T05:51:18,660 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c55d7a9c-97fe-bc49-130e-4731f1cbd10f/cluster_2f457612-755b-caa9-be61-f401a75fa4bd/data/data2/current/BP-1565006184-172.17.0.2-1733637071052 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-08T05:51:18,660 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-08T05:51:18,660 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-08T05:51:18,660 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-08T05:51:18,669 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@76e4c45c{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-08T05:51:18,669 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4637aff6{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-08T05:51:18,669 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-08T05:51:18,669 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@383d55e4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-08T05:51:18,670 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@21b7d177{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c55d7a9c-97fe-bc49-130e-4731f1cbd10f/hadoop.log.dir/,STOPPED} 2024-12-08T05:51:18,678 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-08T05:51:18,704 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-08T05:51:18,712 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestHBaseWalOnEC#testReadWrite[0] Thread=86 (was 159), OpenFileDescriptor=439 (was 391) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=156 (was 170), ProcessCount=11 (was 11), AvailableMemoryMB=7814 (was 8115) 2024-12-08T05:51:18,719 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestHBaseWalOnEC#testReadWrite[1] Thread=86, OpenFileDescriptor=439, MaxFileDescriptor=1048576, SystemLoadAverage=156, ProcessCount=11, AvailableMemoryMB=7814 2024-12-08T05:51:18,719 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=3, rsPorts=, rsClass=null, numDataNodes=3, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-08T05:51:18,719 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c55d7a9c-97fe-bc49-130e-4731f1cbd10f/hadoop.log.dir so I do NOT create it in target/test-data/6fa84232-e713-5021-9152-c233061becb5 2024-12-08T05:51:18,719 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c55d7a9c-97fe-bc49-130e-4731f1cbd10f/hadoop.tmp.dir so I do NOT create it in target/test-data/6fa84232-e713-5021-9152-c233061becb5 2024-12-08T05:51:18,719 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6fa84232-e713-5021-9152-c233061becb5/cluster_151486c2-c699-da18-59fb-290a32d48eee, deleteOnExit=true 2024-12-08T05:51:18,719 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-08T05:51:18,719 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6fa84232-e713-5021-9152-c233061becb5/test.cache.data in system properties and HBase conf 2024-12-08T05:51:18,720 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6fa84232-e713-5021-9152-c233061becb5/hadoop.tmp.dir in system properties and HBase conf 2024-12-08T05:51:18,720 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6fa84232-e713-5021-9152-c233061becb5/hadoop.log.dir in system properties and HBase conf 2024-12-08T05:51:18,720 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6fa84232-e713-5021-9152-c233061becb5/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-08T05:51:18,720 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6fa84232-e713-5021-9152-c233061becb5/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-08T05:51:18,720 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-08T05:51:18,720 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-08T05:51:18,720 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6fa84232-e713-5021-9152-c233061becb5/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-08T05:51:18,720 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6fa84232-e713-5021-9152-c233061becb5/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-08T05:51:18,720 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6fa84232-e713-5021-9152-c233061becb5/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-08T05:51:18,720 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6fa84232-e713-5021-9152-c233061becb5/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-08T05:51:18,721 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6fa84232-e713-5021-9152-c233061becb5/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-08T05:51:18,721 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6fa84232-e713-5021-9152-c233061becb5/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-08T05:51:18,721 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6fa84232-e713-5021-9152-c233061becb5/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-08T05:51:18,721 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6fa84232-e713-5021-9152-c233061becb5/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-08T05:51:18,721 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6fa84232-e713-5021-9152-c233061becb5/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-08T05:51:18,721 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6fa84232-e713-5021-9152-c233061becb5/nfs.dump.dir in system properties and HBase conf 2024-12-08T05:51:18,721 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6fa84232-e713-5021-9152-c233061becb5/java.io.tmpdir in system properties and HBase conf 2024-12-08T05:51:18,721 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6fa84232-e713-5021-9152-c233061becb5/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-08T05:51:18,721 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6fa84232-e713-5021-9152-c233061becb5/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-08T05:51:18,721 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6fa84232-e713-5021-9152-c233061becb5/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-08T05:51:18,815 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-08T05:51:18,820 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-08T05:51:18,821 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-08T05:51:18,821 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-08T05:51:18,821 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-08T05:51:18,822 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-08T05:51:18,822 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@18ee857{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6fa84232-e713-5021-9152-c233061becb5/hadoop.log.dir/,AVAILABLE} 2024-12-08T05:51:18,823 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@49b2b984{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-08T05:51:18,939 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4999c610{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6fa84232-e713-5021-9152-c233061becb5/java.io.tmpdir/jetty-localhost-46709-hadoop-hdfs-3_4_1-tests_jar-_-any-4659545907721922948/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-08T05:51:18,940 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@18b4ebc7{HTTP/1.1, (http/1.1)}{localhost:46709} 2024-12-08T05:51:18,940 INFO [Time-limited test {}] server.Server(415): Started @10034ms 2024-12-08T05:51:19,042 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-08T05:51:19,046 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-08T05:51:19,047 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-08T05:51:19,047 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-08T05:51:19,047 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-08T05:51:19,047 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6d1c78c1{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6fa84232-e713-5021-9152-c233061becb5/hadoop.log.dir/,AVAILABLE} 2024-12-08T05:51:19,048 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5a6744cf{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-08T05:51:19,166 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5fbe2fc0{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6fa84232-e713-5021-9152-c233061becb5/java.io.tmpdir/jetty-localhost-37905-hadoop-hdfs-3_4_1-tests_jar-_-any-2757734340725750308/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-08T05:51:19,166 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@652901a4{HTTP/1.1, (http/1.1)}{localhost:37905} 2024-12-08T05:51:19,166 INFO [Time-limited test {}] server.Server(415): Started @10260ms 2024-12-08T05:51:19,168 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-08T05:51:19,222 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-08T05:51:19,225 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-08T05:51:19,226 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-08T05:51:19,226 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-08T05:51:19,226 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-08T05:51:19,227 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@564d8641{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6fa84232-e713-5021-9152-c233061becb5/hadoop.log.dir/,AVAILABLE} 2024-12-08T05:51:19,227 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@69cb0b1f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-08T05:51:19,306 WARN [Thread-502 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6fa84232-e713-5021-9152-c233061becb5/cluster_151486c2-c699-da18-59fb-290a32d48eee/data/data1/current/BP-909281240-172.17.0.2-1733637078752/current, will proceed with Du for space computation calculation, 2024-12-08T05:51:19,306 WARN [Thread-503 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6fa84232-e713-5021-9152-c233061becb5/cluster_151486c2-c699-da18-59fb-290a32d48eee/data/data2/current/BP-909281240-172.17.0.2-1733637078752/current, will proceed with Du for space computation calculation, 2024-12-08T05:51:19,325 WARN [Thread-481 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-08T05:51:19,328 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xb0b40942a2a29f3e with lease ID 0xab8c81510567e306: Processing first storage report for DS-ad48f806-bdf5-4895-876f-eef7f1bc2a63 from datanode DatanodeRegistration(127.0.0.1:33209, datanodeUuid=b30de565-a784-4e5b-aa43-30e87f66dcf3, infoPort=44555, infoSecurePort=0, ipcPort=35237, storageInfo=lv=-57;cid=testClusterID;nsid=1943279025;c=1733637078752) 2024-12-08T05:51:19,328 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb0b40942a2a29f3e with lease ID 0xab8c81510567e306: from storage DS-ad48f806-bdf5-4895-876f-eef7f1bc2a63 node DatanodeRegistration(127.0.0.1:33209, datanodeUuid=b30de565-a784-4e5b-aa43-30e87f66dcf3, infoPort=44555, infoSecurePort=0, ipcPort=35237, storageInfo=lv=-57;cid=testClusterID;nsid=1943279025;c=1733637078752), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-08T05:51:19,328 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xb0b40942a2a29f3e with lease ID 0xab8c81510567e306: Processing first storage report for DS-e682cb97-87ab-4b57-b0ef-af2b68a1ae9c from datanode DatanodeRegistration(127.0.0.1:33209, datanodeUuid=b30de565-a784-4e5b-aa43-30e87f66dcf3, infoPort=44555, infoSecurePort=0, ipcPort=35237, storageInfo=lv=-57;cid=testClusterID;nsid=1943279025;c=1733637078752) 2024-12-08T05:51:19,328 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb0b40942a2a29f3e with lease ID 0xab8c81510567e306: from storage DS-e682cb97-87ab-4b57-b0ef-af2b68a1ae9c node DatanodeRegistration(127.0.0.1:33209, datanodeUuid=b30de565-a784-4e5b-aa43-30e87f66dcf3, infoPort=44555, infoSecurePort=0, ipcPort=35237, storageInfo=lv=-57;cid=testClusterID;nsid=1943279025;c=1733637078752), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-08T05:51:19,344 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@31ac7b98{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6fa84232-e713-5021-9152-c233061becb5/java.io.tmpdir/jetty-localhost-37461-hadoop-hdfs-3_4_1-tests_jar-_-any-14993011536597730391/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-08T05:51:19,344 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@284cd9af{HTTP/1.1, (http/1.1)}{localhost:37461} 2024-12-08T05:51:19,344 INFO [Time-limited test {}] server.Server(415): Started @10438ms 2024-12-08T05:51:19,346 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-08T05:51:19,377 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-08T05:51:19,380 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-08T05:51:19,382 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-08T05:51:19,382 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-08T05:51:19,382 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-08T05:51:19,382 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@328032c9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6fa84232-e713-5021-9152-c233061becb5/hadoop.log.dir/,AVAILABLE} 2024-12-08T05:51:19,383 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7adc0795{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-08T05:51:19,461 WARN [Thread-537 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6fa84232-e713-5021-9152-c233061becb5/cluster_151486c2-c699-da18-59fb-290a32d48eee/data/data3/current/BP-909281240-172.17.0.2-1733637078752/current, will proceed with Du for space computation calculation, 2024-12-08T05:51:19,462 WARN [Thread-538 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6fa84232-e713-5021-9152-c233061becb5/cluster_151486c2-c699-da18-59fb-290a32d48eee/data/data4/current/BP-909281240-172.17.0.2-1733637078752/current, will proceed with Du for space computation calculation, 2024-12-08T05:51:19,479 WARN [Thread-517 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-08T05:51:19,483 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x64a337e89da63c7c with lease ID 0xab8c81510567e307: Processing first storage report for DS-4658b901-4e74-4eeb-8ae4-bc76edf87309 from datanode DatanodeRegistration(127.0.0.1:46317, datanodeUuid=cc57298d-2bd5-42c8-8d5c-e752448197d9, infoPort=33125, infoSecurePort=0, ipcPort=40783, storageInfo=lv=-57;cid=testClusterID;nsid=1943279025;c=1733637078752) 2024-12-08T05:51:19,483 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x64a337e89da63c7c with lease ID 0xab8c81510567e307: from storage DS-4658b901-4e74-4eeb-8ae4-bc76edf87309 node DatanodeRegistration(127.0.0.1:46317, datanodeUuid=cc57298d-2bd5-42c8-8d5c-e752448197d9, infoPort=33125, infoSecurePort=0, ipcPort=40783, storageInfo=lv=-57;cid=testClusterID;nsid=1943279025;c=1733637078752), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-08T05:51:19,483 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x64a337e89da63c7c with lease ID 0xab8c81510567e307: Processing first storage report for DS-0d61f936-02d9-4a25-8001-1b5f738e8576 from datanode DatanodeRegistration(127.0.0.1:46317, datanodeUuid=cc57298d-2bd5-42c8-8d5c-e752448197d9, infoPort=33125, infoSecurePort=0, ipcPort=40783, storageInfo=lv=-57;cid=testClusterID;nsid=1943279025;c=1733637078752) 2024-12-08T05:51:19,483 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x64a337e89da63c7c with lease ID 0xab8c81510567e307: from storage DS-0d61f936-02d9-4a25-8001-1b5f738e8576 node DatanodeRegistration(127.0.0.1:46317, datanodeUuid=cc57298d-2bd5-42c8-8d5c-e752448197d9, infoPort=33125, infoSecurePort=0, ipcPort=40783, storageInfo=lv=-57;cid=testClusterID;nsid=1943279025;c=1733637078752), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-08T05:51:19,501 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7aa48b42{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6fa84232-e713-5021-9152-c233061becb5/java.io.tmpdir/jetty-localhost-45191-hadoop-hdfs-3_4_1-tests_jar-_-any-4376008345925100115/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-08T05:51:19,502 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4924c16d{HTTP/1.1, (http/1.1)}{localhost:45191} 2024-12-08T05:51:19,502 INFO [Time-limited test {}] server.Server(415): Started @10596ms 2024-12-08T05:51:19,503 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-08T05:51:19,599 WARN [Thread-563 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6fa84232-e713-5021-9152-c233061becb5/cluster_151486c2-c699-da18-59fb-290a32d48eee/data/data5/current/BP-909281240-172.17.0.2-1733637078752/current, will proceed with Du for space computation calculation, 2024-12-08T05:51:19,599 WARN [Thread-564 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6fa84232-e713-5021-9152-c233061becb5/cluster_151486c2-c699-da18-59fb-290a32d48eee/data/data6/current/BP-909281240-172.17.0.2-1733637078752/current, will proceed with Du for space computation calculation, 2024-12-08T05:51:19,622 WARN [Thread-552 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-08T05:51:19,625 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x4a25b9005e206606 with lease ID 0xab8c81510567e308: Processing first storage report for DS-f0a1d281-5752-4eac-bda6-b0319e1e93cb from datanode DatanodeRegistration(127.0.0.1:36953, datanodeUuid=5c41820f-237d-41b5-81ad-29a090585a50, infoPort=45613, infoSecurePort=0, ipcPort=34503, storageInfo=lv=-57;cid=testClusterID;nsid=1943279025;c=1733637078752) 2024-12-08T05:51:19,625 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x4a25b9005e206606 with lease ID 0xab8c81510567e308: from storage DS-f0a1d281-5752-4eac-bda6-b0319e1e93cb node DatanodeRegistration(127.0.0.1:36953, datanodeUuid=5c41820f-237d-41b5-81ad-29a090585a50, infoPort=45613, infoSecurePort=0, ipcPort=34503, storageInfo=lv=-57;cid=testClusterID;nsid=1943279025;c=1733637078752), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-08T05:51:19,626 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x4a25b9005e206606 with lease ID 0xab8c81510567e308: Processing first storage report for DS-1f17a70a-c616-4f4c-b060-40e1c54a4c73 from datanode DatanodeRegistration(127.0.0.1:36953, datanodeUuid=5c41820f-237d-41b5-81ad-29a090585a50, infoPort=45613, infoSecurePort=0, ipcPort=34503, storageInfo=lv=-57;cid=testClusterID;nsid=1943279025;c=1733637078752) 2024-12-08T05:51:19,626 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x4a25b9005e206606 with lease ID 0xab8c81510567e308: from storage DS-1f17a70a-c616-4f4c-b060-40e1c54a4c73 node DatanodeRegistration(127.0.0.1:36953, datanodeUuid=5c41820f-237d-41b5-81ad-29a090585a50, infoPort=45613, infoSecurePort=0, ipcPort=34503, storageInfo=lv=-57;cid=testClusterID;nsid=1943279025;c=1733637078752), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-08T05:51:19,629 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6fa84232-e713-5021-9152-c233061becb5 2024-12-08T05:51:19,632 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6fa84232-e713-5021-9152-c233061becb5/cluster_151486c2-c699-da18-59fb-290a32d48eee/zookeeper_0, clientPort=60357, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6fa84232-e713-5021-9152-c233061becb5/cluster_151486c2-c699-da18-59fb-290a32d48eee/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6fa84232-e713-5021-9152-c233061becb5/cluster_151486c2-c699-da18-59fb-290a32d48eee/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-08T05:51:19,633 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=60357 2024-12-08T05:51:19,633 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T05:51:19,634 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T05:51:19,647 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33209 is added to blk_1073741825_1001 (size=7) 2024-12-08T05:51:19,648 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36953 is added to blk_1073741825_1001 (size=7) 2024-12-08T05:51:19,648 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46317 is added to blk_1073741825_1001 (size=7) 2024-12-08T05:51:19,649 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:40721/user/jenkins/test-data/be876146-cd8a-b6cb-1488-9f0920b7d43b with version=8 2024-12-08T05:51:19,650 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:43739/user/jenkins/test-data/701611d3-3b45-66bc-65ba-44de2053beb1/hbase-staging 2024-12-08T05:51:19,652 INFO [Time-limited test {}] client.ConnectionUtils(128): master/0d942cb2025d:0 server-side Connection retries=45 2024-12-08T05:51:19,652 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-08T05:51:19,652 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-08T05:51:19,652 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-08T05:51:19,652 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-08T05:51:19,652 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-08T05:51:19,652 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-08T05:51:19,652 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-08T05:51:19,653 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:35661 2024-12-08T05:51:19,655 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:35661 connecting to ZooKeeper ensemble=127.0.0.1:60357 2024-12-08T05:51:19,661 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:356610x0, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-08T05:51:19,661 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:35661-0x10190a06cdf0000 connected 2024-12-08T05:51:19,679 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T05:51:19,681 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T05:51:19,682 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:35661-0x10190a06cdf0000, quorum=127.0.0.1:60357, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-08T05:51:19,683 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:40721/user/jenkins/test-data/be876146-cd8a-b6cb-1488-9f0920b7d43b, hbase.cluster.distributed=false 2024-12-08T05:51:19,684 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:35661-0x10190a06cdf0000, quorum=127.0.0.1:60357, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-08T05:51:19,685 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=35661 2024-12-08T05:51:19,687 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=35661 2024-12-08T05:51:19,688 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=35661 2024-12-08T05:51:19,689 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=35661 2024-12-08T05:51:19,691 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=35661 2024-12-08T05:51:19,706 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/0d942cb2025d:0 server-side Connection retries=45 2024-12-08T05:51:19,706 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-08T05:51:19,706 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-08T05:51:19,707 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-08T05:51:19,707 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-08T05:51:19,707 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-08T05:51:19,707 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-08T05:51:19,707 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-08T05:51:19,708 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:41211 2024-12-08T05:51:19,709 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:41211 connecting to ZooKeeper ensemble=127.0.0.1:60357 2024-12-08T05:51:19,710 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T05:51:19,713 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T05:51:19,717 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:412110x0, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-08T05:51:19,718 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:412110x0, quorum=127.0.0.1:60357, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-08T05:51:19,718 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:41211-0x10190a06cdf0001 connected 2024-12-08T05:51:19,718 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-08T05:51:19,719 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-08T05:51:19,719 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41211-0x10190a06cdf0001, quorum=127.0.0.1:60357, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-08T05:51:19,720 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41211-0x10190a06cdf0001, quorum=127.0.0.1:60357, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-08T05:51:19,721 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=41211 2024-12-08T05:51:19,721 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=41211 2024-12-08T05:51:19,721 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=41211 2024-12-08T05:51:19,722 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=41211 2024-12-08T05:51:19,722 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=41211 2024-12-08T05:51:19,737 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/0d942cb2025d:0 server-side Connection retries=45 2024-12-08T05:51:19,737 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-08T05:51:19,737 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-08T05:51:19,737 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-08T05:51:19,737 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-08T05:51:19,737 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-08T05:51:19,737 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-08T05:51:19,737 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-08T05:51:19,738 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:46289 2024-12-08T05:51:19,739 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:46289 connecting to ZooKeeper ensemble=127.0.0.1:60357 2024-12-08T05:51:19,740 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T05:51:19,742 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T05:51:19,746 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:462890x0, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-08T05:51:19,746 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:46289-0x10190a06cdf0002 connected 2024-12-08T05:51:19,746 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46289-0x10190a06cdf0002, quorum=127.0.0.1:60357, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-08T05:51:19,747 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-08T05:51:19,747 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-08T05:51:19,748 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46289-0x10190a06cdf0002, quorum=127.0.0.1:60357, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-08T05:51:19,749 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46289-0x10190a06cdf0002, quorum=127.0.0.1:60357, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-08T05:51:19,750 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=46289 2024-12-08T05:51:19,750 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=46289 2024-12-08T05:51:19,750 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=46289 2024-12-08T05:51:19,751 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=46289 2024-12-08T05:51:19,751 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=46289 2024-12-08T05:51:19,767 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/0d942cb2025d:0 server-side Connection retries=45 2024-12-08T05:51:19,768 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-08T05:51:19,768 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-08T05:51:19,768 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-08T05:51:19,768 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-08T05:51:19,768 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-08T05:51:19,768 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-08T05:51:19,768 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-08T05:51:19,769 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:34711 2024-12-08T05:51:19,770 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:34711 connecting to ZooKeeper ensemble=127.0.0.1:60357 2024-12-08T05:51:19,770 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T05:51:19,772 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T05:51:19,776 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:347110x0, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-08T05:51:19,776 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:347110x0, quorum=127.0.0.1:60357, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-08T05:51:19,777 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:34711-0x10190a06cdf0003 connected 2024-12-08T05:51:19,777 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-08T05:51:19,777 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-08T05:51:19,778 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34711-0x10190a06cdf0003, quorum=127.0.0.1:60357, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-08T05:51:19,779 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34711-0x10190a06cdf0003, quorum=127.0.0.1:60357, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-08T05:51:19,780 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=34711 2024-12-08T05:51:19,780 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=34711 2024-12-08T05:51:19,780 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=34711 2024-12-08T05:51:19,781 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=34711 2024-12-08T05:51:19,781 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=34711 2024-12-08T05:51:19,792 DEBUG [M:0;0d942cb2025d:35661 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;0d942cb2025d:35661 2024-12-08T05:51:19,793 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/0d942cb2025d,35661,1733637079651 2024-12-08T05:51:19,796 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34711-0x10190a06cdf0003, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-08T05:51:19,796 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46289-0x10190a06cdf0002, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-08T05:51:19,796 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41211-0x10190a06cdf0001, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-08T05:51:19,796 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35661-0x10190a06cdf0000, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-08T05:51:19,796 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:35661-0x10190a06cdf0000, quorum=127.0.0.1:60357, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/0d942cb2025d,35661,1733637079651 2024-12-08T05:51:19,798 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46289-0x10190a06cdf0002, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-08T05:51:19,798 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35661-0x10190a06cdf0000, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T05:51:19,798 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46289-0x10190a06cdf0002, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T05:51:19,798 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41211-0x10190a06cdf0001, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-08T05:51:19,798 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41211-0x10190a06cdf0001, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T05:51:19,799 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34711-0x10190a06cdf0003, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-08T05:51:19,799 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34711-0x10190a06cdf0003, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T05:51:19,800 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:35661-0x10190a06cdf0000, quorum=127.0.0.1:60357, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-08T05:51:19,800 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/0d942cb2025d,35661,1733637079651 from backup master directory 2024-12-08T05:51:19,802 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35661-0x10190a06cdf0000, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/0d942cb2025d,35661,1733637079651 2024-12-08T05:51:19,802 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34711-0x10190a06cdf0003, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-08T05:51:19,802 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41211-0x10190a06cdf0001, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-08T05:51:19,802 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46289-0x10190a06cdf0002, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-08T05:51:19,802 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35661-0x10190a06cdf0000, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-08T05:51:19,802 WARN [master/0d942cb2025d:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-08T05:51:19,802 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=0d942cb2025d,35661,1733637079651 2024-12-08T05:51:19,809 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:40721/user/jenkins/test-data/be876146-cd8a-b6cb-1488-9f0920b7d43b/hbase.id] with ID: 19a40033-d9fc-4df3-9fb1-c4686580a6de 2024-12-08T05:51:19,809 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:40721/user/jenkins/test-data/be876146-cd8a-b6cb-1488-9f0920b7d43b/.tmp/hbase.id 2024-12-08T05:51:19,818 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36953 is added to blk_1073741826_1002 (size=42) 2024-12-08T05:51:19,818 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33209 is added to blk_1073741826_1002 (size=42) 2024-12-08T05:51:19,819 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46317 is added to blk_1073741826_1002 (size=42) 2024-12-08T05:51:19,820 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:40721/user/jenkins/test-data/be876146-cd8a-b6cb-1488-9f0920b7d43b/.tmp/hbase.id]:[hdfs://localhost:40721/user/jenkins/test-data/be876146-cd8a-b6cb-1488-9f0920b7d43b/hbase.id] 2024-12-08T05:51:19,839 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T05:51:19,839 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-08T05:51:19,841 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-12-08T05:51:19,844 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41211-0x10190a06cdf0001, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T05:51:19,844 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35661-0x10190a06cdf0000, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T05:51:19,844 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46289-0x10190a06cdf0002, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T05:51:19,844 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34711-0x10190a06cdf0003, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T05:51:19,853 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36953 is added to blk_1073741827_1003 (size=196) 2024-12-08T05:51:19,853 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46317 is added to blk_1073741827_1003 (size=196) 2024-12-08T05:51:19,854 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33209 is added to blk_1073741827_1003 (size=196) 2024-12-08T05:51:19,855 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-08T05:51:19,856 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-08T05:51:19,856 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-08T05:51:19,867 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33209 is added to blk_1073741828_1004 (size=1189) 2024-12-08T05:51:19,867 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46317 is added to blk_1073741828_1004 (size=1189) 2024-12-08T05:51:19,867 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36953 is added to blk_1073741828_1004 (size=1189) 2024-12-08T05:51:19,869 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:40721/user/jenkins/test-data/be876146-cd8a-b6cb-1488-9f0920b7d43b/MasterData/data/master/store 2024-12-08T05:51:19,877 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33209 is added to blk_1073741829_1005 (size=34) 2024-12-08T05:51:19,877 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36953 is added to blk_1073741829_1005 (size=34) 2024-12-08T05:51:19,878 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46317 is added to blk_1073741829_1005 (size=34) 2024-12-08T05:51:19,879 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T05:51:19,879 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-08T05:51:19,879 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T05:51:19,879 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T05:51:19,879 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-08T05:51:19,879 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T05:51:19,879 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T05:51:19,879 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733637079879Disabling compacts and flushes for region at 1733637079879Disabling writes for close at 1733637079879Writing region close event to WAL at 1733637079879Closed at 1733637079879 2024-12-08T05:51:19,880 WARN [master/0d942cb2025d:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:40721/user/jenkins/test-data/be876146-cd8a-b6cb-1488-9f0920b7d43b/MasterData/data/master/store/.initializing 2024-12-08T05:51:19,880 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:40721/user/jenkins/test-data/be876146-cd8a-b6cb-1488-9f0920b7d43b/MasterData/WALs/0d942cb2025d,35661,1733637079651 2024-12-08T05:51:19,884 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=0d942cb2025d%2C35661%2C1733637079651, suffix=, logDir=hdfs://localhost:40721/user/jenkins/test-data/be876146-cd8a-b6cb-1488-9f0920b7d43b/MasterData/WALs/0d942cb2025d,35661,1733637079651, archiveDir=hdfs://localhost:40721/user/jenkins/test-data/be876146-cd8a-b6cb-1488-9f0920b7d43b/MasterData/oldWALs, maxLogs=10 2024-12-08T05:51:19,885 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0d942cb2025d%2C35661%2C1733637079651.1733637079884 2024-12-08T05:51:19,895 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/be876146-cd8a-b6cb-1488-9f0920b7d43b/MasterData/WALs/0d942cb2025d,35661,1733637079651/0d942cb2025d%2C35661%2C1733637079651.1733637079884 2024-12-08T05:51:19,897 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44555:44555),(127.0.0.1/127.0.0.1:45613:45613),(127.0.0.1/127.0.0.1:33125:33125)] 2024-12-08T05:51:19,898 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-08T05:51:19,898 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T05:51:19,898 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-08T05:51:19,898 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-08T05:51:19,900 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-08T05:51:19,902 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-08T05:51:19,902 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T05:51:19,902 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T05:51:19,903 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-08T05:51:19,904 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-08T05:51:19,904 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T05:51:19,905 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-08T05:51:19,905 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-08T05:51:19,907 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-08T05:51:19,907 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T05:51:19,908 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-08T05:51:19,908 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-08T05:51:19,910 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-08T05:51:19,910 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T05:51:19,910 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-08T05:51:19,911 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-08T05:51:19,912 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40721/user/jenkins/test-data/be876146-cd8a-b6cb-1488-9f0920b7d43b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-08T05:51:19,912 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40721/user/jenkins/test-data/be876146-cd8a-b6cb-1488-9f0920b7d43b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-08T05:51:19,914 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-08T05:51:19,914 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-08T05:51:19,915 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-08T05:51:19,916 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-08T05:51:19,919 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40721/user/jenkins/test-data/be876146-cd8a-b6cb-1488-9f0920b7d43b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-08T05:51:19,920 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=74694642, jitterRate=0.11303690075874329}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-08T05:51:19,921 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733637079899Initializing all the Stores at 1733637079900 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733637079900Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733637079900Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733637079900Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733637079900Cleaning up temporary data from old regions at 1733637079914 (+14 ms)Region opened successfully at 1733637079921 (+7 ms) 2024-12-08T05:51:19,922 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-08T05:51:19,927 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7dabfbf8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=0d942cb2025d/172.17.0.2:0 2024-12-08T05:51:19,928 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-08T05:51:19,928 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-08T05:51:19,928 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-08T05:51:19,929 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-08T05:51:19,929 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-12-08T05:51:19,930 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-12-08T05:51:19,930 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-08T05:51:19,932 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-08T05:51:19,933 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35661-0x10190a06cdf0000, quorum=127.0.0.1:60357, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-08T05:51:19,936 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-08T05:51:19,936 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-08T05:51:19,937 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35661-0x10190a06cdf0000, quorum=127.0.0.1:60357, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-08T05:51:19,938 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-08T05:51:19,939 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-08T05:51:19,940 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35661-0x10190a06cdf0000, quorum=127.0.0.1:60357, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-08T05:51:19,941 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-08T05:51:19,942 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35661-0x10190a06cdf0000, quorum=127.0.0.1:60357, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-08T05:51:19,943 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-08T05:51:19,945 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35661-0x10190a06cdf0000, quorum=127.0.0.1:60357, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-08T05:51:19,948 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-08T05:51:19,949 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46289-0x10190a06cdf0002, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-08T05:51:19,949 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34711-0x10190a06cdf0003, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-08T05:51:19,949 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41211-0x10190a06cdf0001, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-08T05:51:19,949 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35661-0x10190a06cdf0000, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-08T05:51:19,950 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46289-0x10190a06cdf0002, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T05:51:19,950 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41211-0x10190a06cdf0001, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T05:51:19,950 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35661-0x10190a06cdf0000, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T05:51:19,950 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34711-0x10190a06cdf0003, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T05:51:19,950 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=0d942cb2025d,35661,1733637079651, sessionid=0x10190a06cdf0000, setting cluster-up flag (Was=false) 2024-12-08T05:51:19,953 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46289-0x10190a06cdf0002, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T05:51:19,953 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35661-0x10190a06cdf0000, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T05:51:19,953 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41211-0x10190a06cdf0001, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T05:51:19,954 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34711-0x10190a06cdf0003, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T05:51:19,960 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-08T05:51:19,961 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=0d942cb2025d,35661,1733637079651 2024-12-08T05:51:19,967 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46289-0x10190a06cdf0002, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T05:51:19,967 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41211-0x10190a06cdf0001, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T05:51:19,967 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34711-0x10190a06cdf0003, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T05:51:19,967 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35661-0x10190a06cdf0000, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T05:51:19,971 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-08T05:51:19,973 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=0d942cb2025d,35661,1733637079651 2024-12-08T05:51:19,974 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:40721/user/jenkins/test-data/be876146-cd8a-b6cb-1488-9f0920b7d43b/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-08T05:51:19,977 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-08T05:51:19,977 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-08T05:51:19,977 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-08T05:51:19,977 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 0d942cb2025d,35661,1733637079651 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-08T05:51:19,979 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/0d942cb2025d:0, corePoolSize=5, maxPoolSize=5 2024-12-08T05:51:19,979 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/0d942cb2025d:0, corePoolSize=5, maxPoolSize=5 2024-12-08T05:51:19,979 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/0d942cb2025d:0, corePoolSize=5, maxPoolSize=5 2024-12-08T05:51:19,979 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/0d942cb2025d:0, corePoolSize=5, maxPoolSize=5 2024-12-08T05:51:19,979 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/0d942cb2025d:0, corePoolSize=10, maxPoolSize=10 2024-12-08T05:51:19,979 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/0d942cb2025d:0, corePoolSize=1, maxPoolSize=1 2024-12-08T05:51:19,979 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/0d942cb2025d:0, corePoolSize=2, maxPoolSize=2 2024-12-08T05:51:19,979 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/0d942cb2025d:0, corePoolSize=1, maxPoolSize=1 2024-12-08T05:51:19,980 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733637109980 2024-12-08T05:51:19,981 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-08T05:51:19,981 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-08T05:51:19,981 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-08T05:51:19,981 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-08T05:51:19,981 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-08T05:51:19,981 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-08T05:51:19,981 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-08T05:51:19,981 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-08T05:51:19,981 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-08T05:51:19,981 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-08T05:51:19,982 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-08T05:51:19,982 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-08T05:51:19,983 INFO [RS:1;0d942cb2025d:46289 {}] regionserver.HRegionServer(746): ClusterId : 19a40033-d9fc-4df3-9fb1-c4686580a6de 2024-12-08T05:51:19,983 DEBUG [RS:1;0d942cb2025d:46289 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-08T05:51:19,983 INFO [RS:0;0d942cb2025d:41211 {}] regionserver.HRegionServer(746): ClusterId : 19a40033-d9fc-4df3-9fb1-c4686580a6de 2024-12-08T05:51:19,983 DEBUG [RS:0;0d942cb2025d:41211 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-08T05:51:19,983 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T05:51:19,983 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-08T05:51:19,984 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-08T05:51:19,984 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-08T05:51:19,986 INFO [RS:2;0d942cb2025d:34711 {}] regionserver.HRegionServer(746): ClusterId : 19a40033-d9fc-4df3-9fb1-c4686580a6de 2024-12-08T05:51:19,986 DEBUG [RS:2;0d942cb2025d:34711 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-08T05:51:19,986 DEBUG [RS:1;0d942cb2025d:46289 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-08T05:51:19,986 DEBUG [RS:1;0d942cb2025d:46289 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-08T05:51:19,987 DEBUG [RS:0;0d942cb2025d:41211 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-08T05:51:19,987 DEBUG [RS:0;0d942cb2025d:41211 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-08T05:51:19,988 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/0d942cb2025d:0:becomeActiveMaster-HFileCleaner.large.0-1733637079984,5,FailOnTimeoutGroup] 2024-12-08T05:51:19,989 DEBUG [RS:2;0d942cb2025d:34711 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-08T05:51:19,989 DEBUG [RS:2;0d942cb2025d:34711 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-08T05:51:19,989 DEBUG [RS:1;0d942cb2025d:46289 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-08T05:51:19,989 DEBUG [RS:0;0d942cb2025d:41211 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-08T05:51:19,990 DEBUG [RS:1;0d942cb2025d:46289 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@78d1482c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=0d942cb2025d/172.17.0.2:0 2024-12-08T05:51:19,990 DEBUG [RS:0;0d942cb2025d:41211 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@418413d0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=0d942cb2025d/172.17.0.2:0 2024-12-08T05:51:19,992 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/0d942cb2025d:0:becomeActiveMaster-HFileCleaner.small.0-1733637079988,5,FailOnTimeoutGroup] 2024-12-08T05:51:19,992 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-08T05:51:19,993 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-08T05:51:19,993 DEBUG [RS:2;0d942cb2025d:34711 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-08T05:51:19,993 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-08T05:51:19,993 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-08T05:51:19,993 DEBUG [RS:2;0d942cb2025d:34711 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@69bd327d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=0d942cb2025d/172.17.0.2:0 2024-12-08T05:51:20,001 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46317 is added to blk_1073741831_1007 (size=1321) 2024-12-08T05:51:20,002 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36953 is added to blk_1073741831_1007 (size=1321) 2024-12-08T05:51:20,003 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33209 is added to blk_1073741831_1007 (size=1321) 2024-12-08T05:51:20,003 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:40721/user/jenkins/test-data/be876146-cd8a-b6cb-1488-9f0920b7d43b/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-08T05:51:20,004 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:40721/user/jenkins/test-data/be876146-cd8a-b6cb-1488-9f0920b7d43b 2024-12-08T05:51:20,007 DEBUG [RS:1;0d942cb2025d:46289 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;0d942cb2025d:46289 2024-12-08T05:51:20,007 INFO [RS:1;0d942cb2025d:46289 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-08T05:51:20,007 INFO [RS:1;0d942cb2025d:46289 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-08T05:51:20,007 DEBUG [RS:1;0d942cb2025d:46289 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-08T05:51:20,008 DEBUG [RS:0;0d942cb2025d:41211 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;0d942cb2025d:41211 2024-12-08T05:51:20,008 INFO [RS:0;0d942cb2025d:41211 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-08T05:51:20,008 INFO [RS:0;0d942cb2025d:41211 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-08T05:51:20,008 DEBUG [RS:0;0d942cb2025d:41211 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-08T05:51:20,009 INFO [RS:1;0d942cb2025d:46289 {}] regionserver.HRegionServer(2659): reportForDuty to master=0d942cb2025d,35661,1733637079651 with port=46289, startcode=1733637079736 2024-12-08T05:51:20,009 DEBUG [RS:1;0d942cb2025d:46289 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-08T05:51:20,009 DEBUG [RS:2;0d942cb2025d:34711 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:2;0d942cb2025d:34711 2024-12-08T05:51:20,009 INFO [RS:2;0d942cb2025d:34711 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-08T05:51:20,009 INFO [RS:2;0d942cb2025d:34711 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-08T05:51:20,009 DEBUG [RS:2;0d942cb2025d:34711 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-08T05:51:20,009 INFO [RS:0;0d942cb2025d:41211 {}] regionserver.HRegionServer(2659): reportForDuty to master=0d942cb2025d,35661,1733637079651 with port=41211, startcode=1733637079706 2024-12-08T05:51:20,010 DEBUG [RS:0;0d942cb2025d:41211 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-08T05:51:20,010 INFO [RS:2;0d942cb2025d:34711 {}] regionserver.HRegionServer(2659): reportForDuty to master=0d942cb2025d,35661,1733637079651 with port=34711, startcode=1733637079767 2024-12-08T05:51:20,010 DEBUG [RS:2;0d942cb2025d:34711 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-08T05:51:20,012 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37375, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.4 (auth:SIMPLE), service=RegionServerStatusService 2024-12-08T05:51:20,013 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35409, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.3 (auth:SIMPLE), service=RegionServerStatusService 2024-12-08T05:51:20,013 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35661 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 0d942cb2025d,46289,1733637079736 2024-12-08T05:51:20,013 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35661 {}] master.ServerManager(517): Registering regionserver=0d942cb2025d,46289,1733637079736 2024-12-08T05:51:20,013 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56199, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.5 (auth:SIMPLE), service=RegionServerStatusService 2024-12-08T05:51:20,015 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35661 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 0d942cb2025d,41211,1733637079706 2024-12-08T05:51:20,015 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35661 {}] master.ServerManager(517): Registering regionserver=0d942cb2025d,41211,1733637079706 2024-12-08T05:51:20,016 DEBUG [RS:1;0d942cb2025d:46289 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:40721/user/jenkins/test-data/be876146-cd8a-b6cb-1488-9f0920b7d43b 2024-12-08T05:51:20,016 DEBUG [RS:1;0d942cb2025d:46289 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:40721 2024-12-08T05:51:20,016 DEBUG [RS:1;0d942cb2025d:46289 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-08T05:51:20,016 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36953 is added to blk_1073741832_1008 (size=32) 2024-12-08T05:51:20,017 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46317 is added to blk_1073741832_1008 (size=32) 2024-12-08T05:51:20,018 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35661 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 0d942cb2025d,34711,1733637079767 2024-12-08T05:51:20,018 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35661 {}] master.ServerManager(517): Registering regionserver=0d942cb2025d,34711,1733637079767 2024-12-08T05:51:20,018 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35661-0x10190a06cdf0000, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-08T05:51:20,018 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33209 is added to blk_1073741832_1008 (size=32) 2024-12-08T05:51:20,019 DEBUG [RS:1;0d942cb2025d:46289 {}] zookeeper.ZKUtil(111): regionserver:46289-0x10190a06cdf0002, quorum=127.0.0.1:60357, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/0d942cb2025d,46289,1733637079736 2024-12-08T05:51:20,019 WARN [RS:1;0d942cb2025d:46289 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-08T05:51:20,019 INFO [RS:1;0d942cb2025d:46289 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-08T05:51:20,019 DEBUG [RS:1;0d942cb2025d:46289 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:40721/user/jenkins/test-data/be876146-cd8a-b6cb-1488-9f0920b7d43b/WALs/0d942cb2025d,46289,1733637079736 2024-12-08T05:51:20,019 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T05:51:20,021 DEBUG [RS:2;0d942cb2025d:34711 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:40721/user/jenkins/test-data/be876146-cd8a-b6cb-1488-9f0920b7d43b 2024-12-08T05:51:20,021 DEBUG [RS:2;0d942cb2025d:34711 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:40721 2024-12-08T05:51:20,021 DEBUG [RS:2;0d942cb2025d:34711 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-08T05:51:20,021 DEBUG [RS:0;0d942cb2025d:41211 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:40721/user/jenkins/test-data/be876146-cd8a-b6cb-1488-9f0920b7d43b 2024-12-08T05:51:20,021 DEBUG [RS:0;0d942cb2025d:41211 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:40721 2024-12-08T05:51:20,021 DEBUG [RS:0;0d942cb2025d:41211 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-08T05:51:20,021 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [0d942cb2025d,46289,1733637079736] 2024-12-08T05:51:20,025 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-08T05:51:20,025 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35661-0x10190a06cdf0000, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-08T05:51:20,027 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-08T05:51:20,027 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T05:51:20,027 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T05:51:20,028 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-08T05:51:20,028 DEBUG [RS:2;0d942cb2025d:34711 {}] zookeeper.ZKUtil(111): regionserver:34711-0x10190a06cdf0003, quorum=127.0.0.1:60357, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/0d942cb2025d,34711,1733637079767 2024-12-08T05:51:20,028 WARN [RS:2;0d942cb2025d:34711 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-08T05:51:20,028 INFO [RS:2;0d942cb2025d:34711 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-08T05:51:20,028 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [0d942cb2025d,41211,1733637079706] 2024-12-08T05:51:20,028 DEBUG [RS:2;0d942cb2025d:34711 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:40721/user/jenkins/test-data/be876146-cd8a-b6cb-1488-9f0920b7d43b/WALs/0d942cb2025d,34711,1733637079767 2024-12-08T05:51:20,028 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [0d942cb2025d,34711,1733637079767] 2024-12-08T05:51:20,028 DEBUG [RS:0;0d942cb2025d:41211 {}] zookeeper.ZKUtil(111): regionserver:41211-0x10190a06cdf0001, quorum=127.0.0.1:60357, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/0d942cb2025d,41211,1733637079706 2024-12-08T05:51:20,028 WARN [RS:0;0d942cb2025d:41211 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-08T05:51:20,029 INFO [RS:0;0d942cb2025d:41211 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-08T05:51:20,029 DEBUG [RS:0;0d942cb2025d:41211 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:40721/user/jenkins/test-data/be876146-cd8a-b6cb-1488-9f0920b7d43b/WALs/0d942cb2025d,41211,1733637079706 2024-12-08T05:51:20,029 INFO [RS:1;0d942cb2025d:46289 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-08T05:51:20,030 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-08T05:51:20,031 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T05:51:20,031 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T05:51:20,031 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-08T05:51:20,032 INFO [RS:1;0d942cb2025d:46289 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-08T05:51:20,032 INFO [RS:1;0d942cb2025d:46289 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-08T05:51:20,033 INFO [RS:1;0d942cb2025d:46289 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-08T05:51:20,033 INFO [RS:1;0d942cb2025d:46289 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-08T05:51:20,033 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-08T05:51:20,033 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T05:51:20,034 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T05:51:20,034 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-08T05:51:20,034 INFO [RS:1;0d942cb2025d:46289 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-08T05:51:20,034 INFO [RS:1;0d942cb2025d:46289 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-08T05:51:20,035 DEBUG [RS:1;0d942cb2025d:46289 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/0d942cb2025d:0, corePoolSize=1, maxPoolSize=1 2024-12-08T05:51:20,035 DEBUG [RS:1;0d942cb2025d:46289 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/0d942cb2025d:0, corePoolSize=1, maxPoolSize=1 2024-12-08T05:51:20,035 DEBUG [RS:1;0d942cb2025d:46289 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/0d942cb2025d:0, corePoolSize=1, maxPoolSize=1 2024-12-08T05:51:20,035 DEBUG [RS:1;0d942cb2025d:46289 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/0d942cb2025d:0, corePoolSize=1, maxPoolSize=1 2024-12-08T05:51:20,035 DEBUG [RS:1;0d942cb2025d:46289 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/0d942cb2025d:0, corePoolSize=1, maxPoolSize=1 2024-12-08T05:51:20,035 DEBUG [RS:1;0d942cb2025d:46289 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/0d942cb2025d:0, corePoolSize=2, maxPoolSize=2 2024-12-08T05:51:20,035 DEBUG [RS:1;0d942cb2025d:46289 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/0d942cb2025d:0, corePoolSize=1, maxPoolSize=1 2024-12-08T05:51:20,035 DEBUG [RS:1;0d942cb2025d:46289 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/0d942cb2025d:0, corePoolSize=1, maxPoolSize=1 2024-12-08T05:51:20,036 DEBUG [RS:1;0d942cb2025d:46289 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/0d942cb2025d:0, corePoolSize=1, maxPoolSize=1 2024-12-08T05:51:20,036 DEBUG [RS:1;0d942cb2025d:46289 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/0d942cb2025d:0, corePoolSize=1, maxPoolSize=1 2024-12-08T05:51:20,036 DEBUG [RS:1;0d942cb2025d:46289 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/0d942cb2025d:0, corePoolSize=1, maxPoolSize=1 2024-12-08T05:51:20,036 DEBUG [RS:1;0d942cb2025d:46289 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/0d942cb2025d:0, corePoolSize=1, maxPoolSize=1 2024-12-08T05:51:20,036 DEBUG [RS:1;0d942cb2025d:46289 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/0d942cb2025d:0, corePoolSize=3, maxPoolSize=3 2024-12-08T05:51:20,036 DEBUG [RS:1;0d942cb2025d:46289 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/0d942cb2025d:0, corePoolSize=3, maxPoolSize=3 2024-12-08T05:51:20,036 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-08T05:51:20,036 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T05:51:20,037 INFO [RS:1;0d942cb2025d:46289 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-08T05:51:20,037 INFO [RS:1;0d942cb2025d:46289 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-08T05:51:20,037 INFO [RS:0;0d942cb2025d:41211 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-08T05:51:20,037 INFO [RS:1;0d942cb2025d:46289 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-08T05:51:20,037 INFO [RS:1;0d942cb2025d:46289 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-08T05:51:20,037 INFO [RS:1;0d942cb2025d:46289 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-08T05:51:20,037 INFO [RS:1;0d942cb2025d:46289 {}] hbase.ChoreService(168): Chore ScheduledChore name=0d942cb2025d,46289,1733637079736-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-08T05:51:20,039 INFO [RS:2;0d942cb2025d:34711 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-08T05:51:20,039 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T05:51:20,039 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-08T05:51:20,040 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40721/user/jenkins/test-data/be876146-cd8a-b6cb-1488-9f0920b7d43b/data/hbase/meta/1588230740 2024-12-08T05:51:20,040 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40721/user/jenkins/test-data/be876146-cd8a-b6cb-1488-9f0920b7d43b/data/hbase/meta/1588230740 2024-12-08T05:51:20,041 INFO [RS:0;0d942cb2025d:41211 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-08T05:51:20,041 INFO [RS:2;0d942cb2025d:34711 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-08T05:51:20,042 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-08T05:51:20,042 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-08T05:51:20,043 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-08T05:51:20,044 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-08T05:51:20,044 INFO [RS:0;0d942cb2025d:41211 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-08T05:51:20,044 INFO [RS:0;0d942cb2025d:41211 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-08T05:51:20,048 INFO [RS:2;0d942cb2025d:34711 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-08T05:51:20,048 INFO [RS:0;0d942cb2025d:41211 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-08T05:51:20,048 INFO [RS:2;0d942cb2025d:34711 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-08T05:51:20,049 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40721/user/jenkins/test-data/be876146-cd8a-b6cb-1488-9f0920b7d43b/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-08T05:51:20,049 INFO [RS:0;0d942cb2025d:41211 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-08T05:51:20,049 INFO [RS:0;0d942cb2025d:41211 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-08T05:51:20,049 DEBUG [RS:0;0d942cb2025d:41211 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/0d942cb2025d:0, corePoolSize=1, maxPoolSize=1 2024-12-08T05:51:20,049 DEBUG [RS:0;0d942cb2025d:41211 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/0d942cb2025d:0, corePoolSize=1, maxPoolSize=1 2024-12-08T05:51:20,049 DEBUG [RS:0;0d942cb2025d:41211 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/0d942cb2025d:0, corePoolSize=1, maxPoolSize=1 2024-12-08T05:51:20,049 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=72536318, jitterRate=0.08087536692619324}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-08T05:51:20,050 DEBUG [RS:0;0d942cb2025d:41211 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/0d942cb2025d:0, corePoolSize=1, maxPoolSize=1 2024-12-08T05:51:20,050 DEBUG [RS:0;0d942cb2025d:41211 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/0d942cb2025d:0, corePoolSize=1, maxPoolSize=1 2024-12-08T05:51:20,050 DEBUG [RS:0;0d942cb2025d:41211 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/0d942cb2025d:0, corePoolSize=2, maxPoolSize=2 2024-12-08T05:51:20,050 DEBUG [RS:0;0d942cb2025d:41211 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/0d942cb2025d:0, corePoolSize=1, maxPoolSize=1 2024-12-08T05:51:20,050 DEBUG [RS:0;0d942cb2025d:41211 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/0d942cb2025d:0, corePoolSize=1, maxPoolSize=1 2024-12-08T05:51:20,050 DEBUG [RS:0;0d942cb2025d:41211 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/0d942cb2025d:0, corePoolSize=1, maxPoolSize=1 2024-12-08T05:51:20,050 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733637080020Initializing all the Stores at 1733637080021 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733637080022 (+1 ms)Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733637080024 (+2 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733637080024Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733637080024Cleaning up temporary data from old regions at 1733637080042 (+18 ms)Region opened successfully at 1733637080050 (+8 ms) 2024-12-08T05:51:20,050 DEBUG [RS:0;0d942cb2025d:41211 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/0d942cb2025d:0, corePoolSize=1, maxPoolSize=1 2024-12-08T05:51:20,050 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-08T05:51:20,050 DEBUG [RS:0;0d942cb2025d:41211 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/0d942cb2025d:0, corePoolSize=1, maxPoolSize=1 2024-12-08T05:51:20,050 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-08T05:51:20,050 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-08T05:51:20,050 DEBUG [RS:0;0d942cb2025d:41211 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/0d942cb2025d:0, corePoolSize=1, maxPoolSize=1 2024-12-08T05:51:20,050 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-08T05:51:20,050 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-08T05:51:20,050 DEBUG [RS:0;0d942cb2025d:41211 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/0d942cb2025d:0, corePoolSize=3, maxPoolSize=3 2024-12-08T05:51:20,051 DEBUG [RS:0;0d942cb2025d:41211 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/0d942cb2025d:0, corePoolSize=3, maxPoolSize=3 2024-12-08T05:51:20,053 INFO [RS:2;0d942cb2025d:34711 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-08T05:51:20,054 INFO [RS:2;0d942cb2025d:34711 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-08T05:51:20,054 INFO [RS:2;0d942cb2025d:34711 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-08T05:51:20,054 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-08T05:51:20,054 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733637080050Disabling compacts and flushes for region at 1733637080050Disabling writes for close at 1733637080050Writing region close event to WAL at 1733637080054 (+4 ms)Closed at 1733637080054 2024-12-08T05:51:20,054 DEBUG [RS:2;0d942cb2025d:34711 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/0d942cb2025d:0, corePoolSize=1, maxPoolSize=1 2024-12-08T05:51:20,055 DEBUG [RS:2;0d942cb2025d:34711 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/0d942cb2025d:0, corePoolSize=1, maxPoolSize=1 2024-12-08T05:51:20,055 DEBUG [RS:2;0d942cb2025d:34711 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/0d942cb2025d:0, corePoolSize=1, maxPoolSize=1 2024-12-08T05:51:20,055 DEBUG [RS:2;0d942cb2025d:34711 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/0d942cb2025d:0, corePoolSize=1, maxPoolSize=1 2024-12-08T05:51:20,055 DEBUG [RS:2;0d942cb2025d:34711 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/0d942cb2025d:0, corePoolSize=1, maxPoolSize=1 2024-12-08T05:51:20,055 DEBUG [RS:2;0d942cb2025d:34711 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/0d942cb2025d:0, corePoolSize=2, maxPoolSize=2 2024-12-08T05:51:20,055 DEBUG [RS:2;0d942cb2025d:34711 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/0d942cb2025d:0, corePoolSize=1, maxPoolSize=1 2024-12-08T05:51:20,055 DEBUG [RS:2;0d942cb2025d:34711 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/0d942cb2025d:0, corePoolSize=1, maxPoolSize=1 2024-12-08T05:51:20,055 DEBUG [RS:2;0d942cb2025d:34711 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/0d942cb2025d:0, corePoolSize=1, maxPoolSize=1 2024-12-08T05:51:20,055 DEBUG [RS:2;0d942cb2025d:34711 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/0d942cb2025d:0, corePoolSize=1, maxPoolSize=1 2024-12-08T05:51:20,055 DEBUG [RS:2;0d942cb2025d:34711 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/0d942cb2025d:0, corePoolSize=1, maxPoolSize=1 2024-12-08T05:51:20,055 DEBUG [RS:2;0d942cb2025d:34711 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/0d942cb2025d:0, corePoolSize=1, maxPoolSize=1 2024-12-08T05:51:20,055 DEBUG [RS:2;0d942cb2025d:34711 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/0d942cb2025d:0, corePoolSize=3, maxPoolSize=3 2024-12-08T05:51:20,056 DEBUG [RS:2;0d942cb2025d:34711 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/0d942cb2025d:0, corePoolSize=3, maxPoolSize=3 2024-12-08T05:51:20,056 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-08T05:51:20,056 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-08T05:51:20,057 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-08T05:51:20,058 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-08T05:51:20,061 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-08T05:51:20,061 INFO [RS:1;0d942cb2025d:46289 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-08T05:51:20,061 INFO [RS:1;0d942cb2025d:46289 {}] hbase.ChoreService(168): Chore ScheduledChore name=0d942cb2025d,46289,1733637079736-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-08T05:51:20,062 INFO [RS:1;0d942cb2025d:46289 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T05:51:20,062 INFO [RS:1;0d942cb2025d:46289 {}] regionserver.Replication(171): 0d942cb2025d,46289,1733637079736 started 2024-12-08T05:51:20,067 INFO [RS:0;0d942cb2025d:41211 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-08T05:51:20,067 INFO [RS:0;0d942cb2025d:41211 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-08T05:51:20,067 INFO [RS:0;0d942cb2025d:41211 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-08T05:51:20,067 INFO [RS:2;0d942cb2025d:34711 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-08T05:51:20,067 INFO [RS:0;0d942cb2025d:41211 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-08T05:51:20,067 INFO [RS:0;0d942cb2025d:41211 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-08T05:51:20,067 INFO [RS:2;0d942cb2025d:34711 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-08T05:51:20,067 INFO [RS:0;0d942cb2025d:41211 {}] hbase.ChoreService(168): Chore ScheduledChore name=0d942cb2025d,41211,1733637079706-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-08T05:51:20,067 INFO [RS:2;0d942cb2025d:34711 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-08T05:51:20,067 INFO [RS:2;0d942cb2025d:34711 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-08T05:51:20,068 INFO [RS:2;0d942cb2025d:34711 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-08T05:51:20,068 INFO [RS:2;0d942cb2025d:34711 {}] hbase.ChoreService(168): Chore ScheduledChore name=0d942cb2025d,34711,1733637079767-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-08T05:51:20,082 INFO [RS:0;0d942cb2025d:41211 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-08T05:51:20,082 INFO [RS:0;0d942cb2025d:41211 {}] hbase.ChoreService(168): Chore ScheduledChore name=0d942cb2025d,41211,1733637079706-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-08T05:51:20,083 INFO [RS:0;0d942cb2025d:41211 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T05:51:20,083 INFO [RS:0;0d942cb2025d:41211 {}] regionserver.Replication(171): 0d942cb2025d,41211,1733637079706 started 2024-12-08T05:51:20,084 INFO [RS:1;0d942cb2025d:46289 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T05:51:20,084 INFO [RS:1;0d942cb2025d:46289 {}] regionserver.HRegionServer(1482): Serving as 0d942cb2025d,46289,1733637079736, RpcServer on 0d942cb2025d/172.17.0.2:46289, sessionid=0x10190a06cdf0002 2024-12-08T05:51:20,084 DEBUG [RS:1;0d942cb2025d:46289 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-08T05:51:20,084 DEBUG [RS:1;0d942cb2025d:46289 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 0d942cb2025d,46289,1733637079736 2024-12-08T05:51:20,084 DEBUG [RS:1;0d942cb2025d:46289 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '0d942cb2025d,46289,1733637079736' 2024-12-08T05:51:20,084 DEBUG [RS:1;0d942cb2025d:46289 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-08T05:51:20,085 DEBUG [RS:1;0d942cb2025d:46289 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-08T05:51:20,086 DEBUG [RS:1;0d942cb2025d:46289 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-08T05:51:20,086 DEBUG [RS:1;0d942cb2025d:46289 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-08T05:51:20,086 DEBUG [RS:1;0d942cb2025d:46289 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 0d942cb2025d,46289,1733637079736 2024-12-08T05:51:20,086 DEBUG [RS:1;0d942cb2025d:46289 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '0d942cb2025d,46289,1733637079736' 2024-12-08T05:51:20,086 DEBUG [RS:1;0d942cb2025d:46289 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-08T05:51:20,086 DEBUG [RS:1;0d942cb2025d:46289 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-08T05:51:20,087 DEBUG [RS:1;0d942cb2025d:46289 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-08T05:51:20,087 INFO [RS:1;0d942cb2025d:46289 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-08T05:51:20,087 INFO [RS:1;0d942cb2025d:46289 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-08T05:51:20,088 INFO [RS:2;0d942cb2025d:34711 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-08T05:51:20,088 INFO [RS:2;0d942cb2025d:34711 {}] hbase.ChoreService(168): Chore ScheduledChore name=0d942cb2025d,34711,1733637079767-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-08T05:51:20,088 INFO [RS:2;0d942cb2025d:34711 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T05:51:20,088 INFO [RS:2;0d942cb2025d:34711 {}] regionserver.Replication(171): 0d942cb2025d,34711,1733637079767 started 2024-12-08T05:51:20,097 INFO [RS:0;0d942cb2025d:41211 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T05:51:20,097 INFO [RS:0;0d942cb2025d:41211 {}] regionserver.HRegionServer(1482): Serving as 0d942cb2025d,41211,1733637079706, RpcServer on 0d942cb2025d/172.17.0.2:41211, sessionid=0x10190a06cdf0001 2024-12-08T05:51:20,097 DEBUG [RS:0;0d942cb2025d:41211 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-08T05:51:20,097 DEBUG [RS:0;0d942cb2025d:41211 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 0d942cb2025d,41211,1733637079706 2024-12-08T05:51:20,097 DEBUG [RS:0;0d942cb2025d:41211 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '0d942cb2025d,41211,1733637079706' 2024-12-08T05:51:20,097 DEBUG [RS:0;0d942cb2025d:41211 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-08T05:51:20,097 DEBUG [RS:0;0d942cb2025d:41211 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-08T05:51:20,098 DEBUG [RS:0;0d942cb2025d:41211 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-08T05:51:20,098 DEBUG [RS:0;0d942cb2025d:41211 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-08T05:51:20,098 DEBUG [RS:0;0d942cb2025d:41211 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 0d942cb2025d,41211,1733637079706 2024-12-08T05:51:20,098 DEBUG [RS:0;0d942cb2025d:41211 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '0d942cb2025d,41211,1733637079706' 2024-12-08T05:51:20,098 DEBUG [RS:0;0d942cb2025d:41211 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-08T05:51:20,098 DEBUG [RS:0;0d942cb2025d:41211 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-08T05:51:20,099 DEBUG [RS:0;0d942cb2025d:41211 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-08T05:51:20,099 INFO [RS:0;0d942cb2025d:41211 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-08T05:51:20,099 INFO [RS:0;0d942cb2025d:41211 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-08T05:51:20,103 INFO [RS:2;0d942cb2025d:34711 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T05:51:20,103 INFO [RS:2;0d942cb2025d:34711 {}] regionserver.HRegionServer(1482): Serving as 0d942cb2025d,34711,1733637079767, RpcServer on 0d942cb2025d/172.17.0.2:34711, sessionid=0x10190a06cdf0003 2024-12-08T05:51:20,103 DEBUG [RS:2;0d942cb2025d:34711 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-08T05:51:20,103 DEBUG [RS:2;0d942cb2025d:34711 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 0d942cb2025d,34711,1733637079767 2024-12-08T05:51:20,103 DEBUG [RS:2;0d942cb2025d:34711 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '0d942cb2025d,34711,1733637079767' 2024-12-08T05:51:20,103 DEBUG [RS:2;0d942cb2025d:34711 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-08T05:51:20,104 DEBUG [RS:2;0d942cb2025d:34711 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-08T05:51:20,104 DEBUG [RS:2;0d942cb2025d:34711 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-08T05:51:20,104 DEBUG [RS:2;0d942cb2025d:34711 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-08T05:51:20,105 DEBUG [RS:2;0d942cb2025d:34711 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 0d942cb2025d,34711,1733637079767 2024-12-08T05:51:20,105 DEBUG [RS:2;0d942cb2025d:34711 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '0d942cb2025d,34711,1733637079767' 2024-12-08T05:51:20,105 DEBUG [RS:2;0d942cb2025d:34711 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-08T05:51:20,105 DEBUG [RS:2;0d942cb2025d:34711 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-08T05:51:20,105 DEBUG [RS:2;0d942cb2025d:34711 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-08T05:51:20,105 INFO [RS:2;0d942cb2025d:34711 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-08T05:51:20,105 INFO [RS:2;0d942cb2025d:34711 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-08T05:51:20,190 INFO [RS:1;0d942cb2025d:46289 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=0d942cb2025d%2C46289%2C1733637079736, suffix=, logDir=hdfs://localhost:40721/user/jenkins/test-data/be876146-cd8a-b6cb-1488-9f0920b7d43b/WALs/0d942cb2025d,46289,1733637079736, archiveDir=hdfs://localhost:40721/user/jenkins/test-data/be876146-cd8a-b6cb-1488-9f0920b7d43b/oldWALs, maxLogs=32 2024-12-08T05:51:20,191 INFO [RS:1;0d942cb2025d:46289 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0d942cb2025d%2C46289%2C1733637079736.1733637080191 2024-12-08T05:51:20,200 INFO [RS:1;0d942cb2025d:46289 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/be876146-cd8a-b6cb-1488-9f0920b7d43b/WALs/0d942cb2025d,46289,1733637079736/0d942cb2025d%2C46289%2C1733637079736.1733637080191 2024-12-08T05:51:20,201 DEBUG [RS:1;0d942cb2025d:46289 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44555:44555),(127.0.0.1/127.0.0.1:45613:45613),(127.0.0.1/127.0.0.1:33125:33125)] 2024-12-08T05:51:20,201 INFO [RS:0;0d942cb2025d:41211 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=0d942cb2025d%2C41211%2C1733637079706, suffix=, logDir=hdfs://localhost:40721/user/jenkins/test-data/be876146-cd8a-b6cb-1488-9f0920b7d43b/WALs/0d942cb2025d,41211,1733637079706, archiveDir=hdfs://localhost:40721/user/jenkins/test-data/be876146-cd8a-b6cb-1488-9f0920b7d43b/oldWALs, maxLogs=32 2024-12-08T05:51:20,203 INFO [RS:0;0d942cb2025d:41211 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0d942cb2025d%2C41211%2C1733637079706.1733637080203 2024-12-08T05:51:20,208 INFO [RS:2;0d942cb2025d:34711 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=0d942cb2025d%2C34711%2C1733637079767, suffix=, logDir=hdfs://localhost:40721/user/jenkins/test-data/be876146-cd8a-b6cb-1488-9f0920b7d43b/WALs/0d942cb2025d,34711,1733637079767, archiveDir=hdfs://localhost:40721/user/jenkins/test-data/be876146-cd8a-b6cb-1488-9f0920b7d43b/oldWALs, maxLogs=32 2024-12-08T05:51:20,209 INFO [RS:2;0d942cb2025d:34711 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0d942cb2025d%2C34711%2C1733637079767.1733637080209 2024-12-08T05:51:20,210 INFO [RS:0;0d942cb2025d:41211 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/be876146-cd8a-b6cb-1488-9f0920b7d43b/WALs/0d942cb2025d,41211,1733637079706/0d942cb2025d%2C41211%2C1733637079706.1733637080203 2024-12-08T05:51:20,211 DEBUG [0d942cb2025d:35661 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-08T05:51:20,212 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=0d942cb2025d,46289,1733637079736 2024-12-08T05:51:20,212 DEBUG [RS:0;0d942cb2025d:41211 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44555:44555),(127.0.0.1/127.0.0.1:33125:33125),(127.0.0.1/127.0.0.1:45613:45613)] 2024-12-08T05:51:20,214 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 0d942cb2025d,46289,1733637079736, state=OPENING 2024-12-08T05:51:20,216 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-08T05:51:20,218 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46289-0x10190a06cdf0002, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T05:51:20,218 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41211-0x10190a06cdf0001, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T05:51:20,218 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34711-0x10190a06cdf0003, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T05:51:20,218 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35661-0x10190a06cdf0000, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T05:51:20,218 INFO [RS:2;0d942cb2025d:34711 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/be876146-cd8a-b6cb-1488-9f0920b7d43b/WALs/0d942cb2025d,34711,1733637079767/0d942cb2025d%2C34711%2C1733637079767.1733637080209 2024-12-08T05:51:20,219 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-08T05:51:20,219 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-08T05:51:20,219 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-08T05:51:20,219 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-08T05:51:20,219 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-08T05:51:20,219 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=0d942cb2025d,46289,1733637079736}] 2024-12-08T05:51:20,225 DEBUG [RS:2;0d942cb2025d:34711 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44555:44555),(127.0.0.1/127.0.0.1:45613:45613),(127.0.0.1/127.0.0.1:33125:33125)] 2024-12-08T05:51:20,373 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-08T05:51:20,375 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-9-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37095, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-08T05:51:20,380 INFO [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-08T05:51:20,381 INFO [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-08T05:51:20,383 INFO [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=0d942cb2025d%2C46289%2C1733637079736.meta, suffix=.meta, logDir=hdfs://localhost:40721/user/jenkins/test-data/be876146-cd8a-b6cb-1488-9f0920b7d43b/WALs/0d942cb2025d,46289,1733637079736, archiveDir=hdfs://localhost:40721/user/jenkins/test-data/be876146-cd8a-b6cb-1488-9f0920b7d43b/oldWALs, maxLogs=32 2024-12-08T05:51:20,384 INFO [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 0d942cb2025d%2C46289%2C1733637079736.meta.1733637080384.meta 2024-12-08T05:51:20,392 INFO [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/be876146-cd8a-b6cb-1488-9f0920b7d43b/WALs/0d942cb2025d,46289,1733637079736/0d942cb2025d%2C46289%2C1733637079736.meta.1733637080384.meta 2024-12-08T05:51:20,396 DEBUG [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45613:45613),(127.0.0.1/127.0.0.1:44555:44555),(127.0.0.1/127.0.0.1:33125:33125)] 2024-12-08T05:51:20,397 DEBUG [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-08T05:51:20,397 DEBUG [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-08T05:51:20,397 DEBUG [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-08T05:51:20,397 INFO [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-08T05:51:20,397 DEBUG [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-08T05:51:20,398 DEBUG [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T05:51:20,398 DEBUG [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-08T05:51:20,398 DEBUG [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-08T05:51:20,400 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-08T05:51:20,401 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-08T05:51:20,401 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T05:51:20,401 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T05:51:20,402 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-08T05:51:20,402 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-08T05:51:20,403 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T05:51:20,403 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T05:51:20,403 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-08T05:51:20,404 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-08T05:51:20,404 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T05:51:20,405 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T05:51:20,405 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-08T05:51:20,406 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-08T05:51:20,406 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T05:51:20,406 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T05:51:20,407 DEBUG [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-08T05:51:20,407 DEBUG [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40721/user/jenkins/test-data/be876146-cd8a-b6cb-1488-9f0920b7d43b/data/hbase/meta/1588230740 2024-12-08T05:51:20,409 DEBUG [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40721/user/jenkins/test-data/be876146-cd8a-b6cb-1488-9f0920b7d43b/data/hbase/meta/1588230740 2024-12-08T05:51:20,411 DEBUG [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-08T05:51:20,411 DEBUG [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-08T05:51:20,411 DEBUG [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-08T05:51:20,413 DEBUG [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-08T05:51:20,414 INFO [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=65457885, jitterRate=-0.02460150420665741}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-08T05:51:20,414 DEBUG [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-08T05:51:20,415 DEBUG [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733637080398Writing region info on filesystem at 1733637080398Initializing all the Stores at 1733637080399 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733637080399Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733637080399Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733637080399Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733637080399Cleaning up temporary data from old regions at 1733637080411 (+12 ms)Running coprocessor post-open hooks at 1733637080414 (+3 ms)Region opened successfully at 1733637080415 (+1 ms) 2024-12-08T05:51:20,417 INFO [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733637080373 2024-12-08T05:51:20,420 DEBUG [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-08T05:51:20,420 INFO [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-08T05:51:20,421 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=0d942cb2025d,46289,1733637079736 2024-12-08T05:51:20,422 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 0d942cb2025d,46289,1733637079736, state=OPEN 2024-12-08T05:51:20,424 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46289-0x10190a06cdf0002, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-08T05:51:20,424 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34711-0x10190a06cdf0003, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-08T05:51:20,424 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41211-0x10190a06cdf0001, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-08T05:51:20,424 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35661-0x10190a06cdf0000, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-08T05:51:20,424 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-08T05:51:20,424 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-08T05:51:20,424 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=0d942cb2025d,46289,1733637079736 2024-12-08T05:51:20,424 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-08T05:51:20,424 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-08T05:51:20,428 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-08T05:51:20,428 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=0d942cb2025d,46289,1733637079736 in 205 msec 2024-12-08T05:51:20,432 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-08T05:51:20,432 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 372 msec 2024-12-08T05:51:20,433 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-08T05:51:20,433 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-08T05:51:20,435 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-08T05:51:20,435 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=0d942cb2025d,46289,1733637079736, seqNum=-1] 2024-12-08T05:51:20,435 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T05:51:20,437 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-9-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34995, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T05:51:20,444 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 468 msec 2024-12-08T05:51:20,445 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733637080444, completionTime=-1 2024-12-08T05:51:20,445 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=3; waited=0ms, expected min=3 server(s), max=3 server(s), master is running 2024-12-08T05:51:20,445 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-08T05:51:20,447 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=3 2024-12-08T05:51:20,447 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733637140447 2024-12-08T05:51:20,447 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733637200447 2024-12-08T05:51:20,447 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-12-08T05:51:20,448 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0d942cb2025d,35661,1733637079651-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-08T05:51:20,448 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0d942cb2025d,35661,1733637079651-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T05:51:20,448 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0d942cb2025d,35661,1733637079651-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T05:51:20,448 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-0d942cb2025d:35661, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T05:51:20,448 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-08T05:51:20,448 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-08T05:51:20,451 DEBUG [master/0d942cb2025d:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-08T05:51:20,453 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.651sec 2024-12-08T05:51:20,453 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-08T05:51:20,454 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-08T05:51:20,454 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-08T05:51:20,454 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-08T05:51:20,454 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-08T05:51:20,454 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0d942cb2025d,35661,1733637079651-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-08T05:51:20,454 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0d942cb2025d,35661,1733637079651-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-08T05:51:20,457 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-08T05:51:20,457 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-08T05:51:20,457 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0d942cb2025d,35661,1733637079651-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T05:51:20,485 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7f3562bf, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T05:51:20,485 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 0d942cb2025d,35661,-1 for getting cluster id 2024-12-08T05:51:20,485 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-08T05:51:20,487 DEBUG [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '19a40033-d9fc-4df3-9fb1-c4686580a6de' 2024-12-08T05:51:20,487 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-08T05:51:20,487 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "19a40033-d9fc-4df3-9fb1-c4686580a6de" 2024-12-08T05:51:20,488 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7f29724, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T05:51:20,488 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [0d942cb2025d,35661,-1] 2024-12-08T05:51:20,488 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-08T05:51:20,488 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T05:51:20,490 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33830, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-08T05:51:20,491 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@61e12713, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T05:51:20,491 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-08T05:51:20,492 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=0d942cb2025d,46289,1733637079736, seqNum=-1] 2024-12-08T05:51:20,493 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T05:51:20,495 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-9-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56428, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T05:51:20,497 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=0d942cb2025d,35661,1733637079651 2024-12-08T05:51:20,498 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-08T05:51:20,499 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] client.AsyncConnectionImpl(321): The fetched master address is 0d942cb2025d,35661,1733637079651 2024-12-08T05:51:20,499 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@2e322aa9 2024-12-08T05:51:20,499 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-08T05:51:20,500 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33846, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-08T05:51:20,501 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35661 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-08T05:51:20,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35661 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC 2024-12-08T05:51:20,505 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_PRE_OPERATION 2024-12-08T05:51:20,505 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T05:51:20,505 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35661 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestHBaseWalOnEC" procId is: 4 2024-12-08T05:51:20,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35661 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-08T05:51:20,507 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-08T05:51:20,519 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36953 is added to blk_1073741837_1013 (size=392) 2024-12-08T05:51:20,519 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46317 is added to blk_1073741837_1013 (size=392) 2024-12-08T05:51:20,519 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33209 is added to blk_1073741837_1013 (size=392) 2024-12-08T05:51:20,523 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => c8bd091d55a09f9ee467f5e8713aaacf, NAME => 'TestHBaseWalOnEC,,1733637080501.c8bd091d55a09f9ee467f5e8713aaacf.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:40721/user/jenkins/test-data/be876146-cd8a-b6cb-1488-9f0920b7d43b 2024-12-08T05:51:20,536 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36953 is added to blk_1073741838_1014 (size=51) 2024-12-08T05:51:20,536 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33209 is added to blk_1073741838_1014 (size=51) 2024-12-08T05:51:20,537 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46317 is added to blk_1073741838_1014 (size=51) 2024-12-08T05:51:20,537 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1733637080501.c8bd091d55a09f9ee467f5e8713aaacf.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T05:51:20,537 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1722): Closing c8bd091d55a09f9ee467f5e8713aaacf, disabling compactions & flushes 2024-12-08T05:51:20,537 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1733637080501.c8bd091d55a09f9ee467f5e8713aaacf. 2024-12-08T05:51:20,537 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1733637080501.c8bd091d55a09f9ee467f5e8713aaacf. 2024-12-08T05:51:20,538 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1733637080501.c8bd091d55a09f9ee467f5e8713aaacf. after waiting 0 ms 2024-12-08T05:51:20,538 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1733637080501.c8bd091d55a09f9ee467f5e8713aaacf. 2024-12-08T05:51:20,538 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1733637080501.c8bd091d55a09f9ee467f5e8713aaacf. 2024-12-08T05:51:20,538 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1676): Region close journal for c8bd091d55a09f9ee467f5e8713aaacf: Waiting for close lock at 1733637080537Disabling compacts and flushes for region at 1733637080537Disabling writes for close at 1733637080538 (+1 ms)Writing region close event to WAL at 1733637080538Closed at 1733637080538 2024-12-08T05:51:20,540 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ADD_TO_META 2024-12-08T05:51:20,540 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestHBaseWalOnEC,,1733637080501.c8bd091d55a09f9ee467f5e8713aaacf.","families":{"info":[{"qualifier":"regioninfo","vlen":50,"tag":[],"timestamp":"1733637080540"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733637080540"}]},"ts":"1733637080540"} 2024-12-08T05:51:20,544 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-08T05:51:20,546 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-08T05:51:20,546 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733637080546"}]},"ts":"1733637080546"} 2024-12-08T05:51:20,548 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLING in hbase:meta 2024-12-08T05:51:20,549 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {0d942cb2025d=0} racks are {/default-rack=0} 2024-12-08T05:51:20,550 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-08T05:51:20,550 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-08T05:51:20,550 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-08T05:51:20,550 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-08T05:51:20,550 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-08T05:51:20,550 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-08T05:51:20,550 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-08T05:51:20,550 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-08T05:51:20,550 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-08T05:51:20,550 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-08T05:51:20,550 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=c8bd091d55a09f9ee467f5e8713aaacf, ASSIGN}] 2024-12-08T05:51:20,552 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=c8bd091d55a09f9ee467f5e8713aaacf, ASSIGN 2024-12-08T05:51:20,554 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=c8bd091d55a09f9ee467f5e8713aaacf, ASSIGN; state=OFFLINE, location=0d942cb2025d,46289,1733637079736; forceNewPlan=false, retain=false 2024-12-08T05:51:20,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35661 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-08T05:51:20,704 INFO [0d942cb2025d:35661 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-08T05:51:20,705 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=c8bd091d55a09f9ee467f5e8713aaacf, regionState=OPENING, regionLocation=0d942cb2025d,46289,1733637079736 2024-12-08T05:51:20,709 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-10-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=c8bd091d55a09f9ee467f5e8713aaacf, ASSIGN because future has completed 2024-12-08T05:51:20,709 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure c8bd091d55a09f9ee467f5e8713aaacf, server=0d942cb2025d,46289,1733637079736}] 2024-12-08T05:51:20,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35661 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-08T05:51:20,869 INFO [RS_OPEN_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestHBaseWalOnEC,,1733637080501.c8bd091d55a09f9ee467f5e8713aaacf. 2024-12-08T05:51:20,869 DEBUG [RS_OPEN_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => c8bd091d55a09f9ee467f5e8713aaacf, NAME => 'TestHBaseWalOnEC,,1733637080501.c8bd091d55a09f9ee467f5e8713aaacf.', STARTKEY => '', ENDKEY => ''} 2024-12-08T05:51:20,869 DEBUG [RS_OPEN_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestHBaseWalOnEC c8bd091d55a09f9ee467f5e8713aaacf 2024-12-08T05:51:20,869 DEBUG [RS_OPEN_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1733637080501.c8bd091d55a09f9ee467f5e8713aaacf.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T05:51:20,870 DEBUG [RS_OPEN_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for c8bd091d55a09f9ee467f5e8713aaacf 2024-12-08T05:51:20,870 DEBUG [RS_OPEN_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for c8bd091d55a09f9ee467f5e8713aaacf 2024-12-08T05:51:20,871 INFO [StoreOpener-c8bd091d55a09f9ee467f5e8713aaacf-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region c8bd091d55a09f9ee467f5e8713aaacf 2024-12-08T05:51:20,873 INFO [StoreOpener-c8bd091d55a09f9ee467f5e8713aaacf-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region c8bd091d55a09f9ee467f5e8713aaacf columnFamilyName cf 2024-12-08T05:51:20,873 DEBUG [StoreOpener-c8bd091d55a09f9ee467f5e8713aaacf-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T05:51:20,874 INFO [StoreOpener-c8bd091d55a09f9ee467f5e8713aaacf-1 {}] regionserver.HStore(327): Store=c8bd091d55a09f9ee467f5e8713aaacf/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-08T05:51:20,874 DEBUG [RS_OPEN_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for c8bd091d55a09f9ee467f5e8713aaacf 2024-12-08T05:51:20,875 DEBUG [RS_OPEN_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40721/user/jenkins/test-data/be876146-cd8a-b6cb-1488-9f0920b7d43b/data/default/TestHBaseWalOnEC/c8bd091d55a09f9ee467f5e8713aaacf 2024-12-08T05:51:20,875 DEBUG [RS_OPEN_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40721/user/jenkins/test-data/be876146-cd8a-b6cb-1488-9f0920b7d43b/data/default/TestHBaseWalOnEC/c8bd091d55a09f9ee467f5e8713aaacf 2024-12-08T05:51:20,876 DEBUG [RS_OPEN_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for c8bd091d55a09f9ee467f5e8713aaacf 2024-12-08T05:51:20,876 DEBUG [RS_OPEN_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for c8bd091d55a09f9ee467f5e8713aaacf 2024-12-08T05:51:20,878 DEBUG [RS_OPEN_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for c8bd091d55a09f9ee467f5e8713aaacf 2024-12-08T05:51:20,880 DEBUG [RS_OPEN_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40721/user/jenkins/test-data/be876146-cd8a-b6cb-1488-9f0920b7d43b/data/default/TestHBaseWalOnEC/c8bd091d55a09f9ee467f5e8713aaacf/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-08T05:51:20,881 INFO [RS_OPEN_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened c8bd091d55a09f9ee467f5e8713aaacf; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=72350212, jitterRate=0.07810217142105103}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-08T05:51:20,881 DEBUG [RS_OPEN_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for c8bd091d55a09f9ee467f5e8713aaacf 2024-12-08T05:51:20,882 DEBUG [RS_OPEN_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for c8bd091d55a09f9ee467f5e8713aaacf: Running coprocessor pre-open hook at 1733637080870Writing region info on filesystem at 1733637080870Initializing all the Stores at 1733637080871 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733637080871Cleaning up temporary data from old regions at 1733637080876 (+5 ms)Running coprocessor post-open hooks at 1733637080881 (+5 ms)Region opened successfully at 1733637080882 (+1 ms) 2024-12-08T05:51:20,884 INFO [RS_OPEN_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestHBaseWalOnEC,,1733637080501.c8bd091d55a09f9ee467f5e8713aaacf., pid=6, masterSystemTime=1733637080863 2024-12-08T05:51:20,887 DEBUG [RS_OPEN_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestHBaseWalOnEC,,1733637080501.c8bd091d55a09f9ee467f5e8713aaacf. 2024-12-08T05:51:20,887 INFO [RS_OPEN_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestHBaseWalOnEC,,1733637080501.c8bd091d55a09f9ee467f5e8713aaacf. 2024-12-08T05:51:20,889 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=c8bd091d55a09f9ee467f5e8713aaacf, regionState=OPEN, openSeqNum=2, regionLocation=0d942cb2025d,46289,1733637079736 2024-12-08T05:51:20,892 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-10-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure c8bd091d55a09f9ee467f5e8713aaacf, server=0d942cb2025d,46289,1733637079736 because future has completed 2024-12-08T05:51:20,899 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-08T05:51:20,899 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure c8bd091d55a09f9ee467f5e8713aaacf, server=0d942cb2025d,46289,1733637079736 in 185 msec 2024-12-08T05:51:20,903 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-08T05:51:20,903 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=c8bd091d55a09f9ee467f5e8713aaacf, ASSIGN in 349 msec 2024-12-08T05:51:20,905 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-08T05:51:20,905 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733637080905"}]},"ts":"1733637080905"} 2024-12-08T05:51:20,908 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLED in hbase:meta 2024-12-08T05:51:20,910 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_POST_OPERATION 2024-12-08T05:51:20,914 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC in 409 msec 2024-12-08T05:51:21,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35661 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-08T05:51:21,136 INFO [RPCClient-NioEventLoopGroup-6-8 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestHBaseWalOnEC completed 2024-12-08T05:51:21,136 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table TestHBaseWalOnEC get assigned. Timeout = 60000ms 2024-12-08T05:51:21,137 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-08T05:51:21,140 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table TestHBaseWalOnEC assigned to meta. Checking AM states. 2024-12-08T05:51:21,140 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-08T05:51:21,140 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table TestHBaseWalOnEC assigned. 2024-12-08T05:51:21,143 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestHBaseWalOnEC', row='row', locateType=CURRENT is [region=TestHBaseWalOnEC,,1733637080501.c8bd091d55a09f9ee467f5e8713aaacf., hostname=0d942cb2025d,46289,1733637079736, seqNum=2] 2024-12-08T05:51:21,147 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35661 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestHBaseWalOnEC 2024-12-08T05:51:21,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35661 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC 2024-12-08T05:51:21,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35661 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-08T05:51:21,150 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_PREPARE 2024-12-08T05:51:21,151 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-08T05:51:21,152 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-08T05:51:21,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35661 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-08T05:51:21,306 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46289 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-12-08T05:51:21,307 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d942cb2025d:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestHBaseWalOnEC,,1733637080501.c8bd091d55a09f9ee467f5e8713aaacf. 2024-12-08T05:51:21,307 INFO [RS_FLUSH_OPERATIONS-regionserver/0d942cb2025d:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing c8bd091d55a09f9ee467f5e8713aaacf 1/1 column families, dataSize=32 B heapSize=360 B 2024-12-08T05:51:21,326 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d942cb2025d:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40721/user/jenkins/test-data/be876146-cd8a-b6cb-1488-9f0920b7d43b/data/default/TestHBaseWalOnEC/c8bd091d55a09f9ee467f5e8713aaacf/.tmp/cf/84a82c3b2e7b49ad850a5ccfc2f818a2 is 36, key is row/cf:cq/1733637081144/Put/seqid=0 2024-12-08T05:51:21,336 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33209 is added to blk_1073741839_1015 (size=4787) 2024-12-08T05:51:21,336 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46317 is added to blk_1073741839_1015 (size=4787) 2024-12-08T05:51:21,337 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36953 is added to blk_1073741839_1015 (size=4787) 2024-12-08T05:51:21,337 INFO [RS_FLUSH_OPERATIONS-regionserver/0d942cb2025d:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=32 B at sequenceid=5 (bloomFilter=false), to=hdfs://localhost:40721/user/jenkins/test-data/be876146-cd8a-b6cb-1488-9f0920b7d43b/data/default/TestHBaseWalOnEC/c8bd091d55a09f9ee467f5e8713aaacf/.tmp/cf/84a82c3b2e7b49ad850a5ccfc2f818a2 2024-12-08T05:51:21,348 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d942cb2025d:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40721/user/jenkins/test-data/be876146-cd8a-b6cb-1488-9f0920b7d43b/data/default/TestHBaseWalOnEC/c8bd091d55a09f9ee467f5e8713aaacf/.tmp/cf/84a82c3b2e7b49ad850a5ccfc2f818a2 as hdfs://localhost:40721/user/jenkins/test-data/be876146-cd8a-b6cb-1488-9f0920b7d43b/data/default/TestHBaseWalOnEC/c8bd091d55a09f9ee467f5e8713aaacf/cf/84a82c3b2e7b49ad850a5ccfc2f818a2 2024-12-08T05:51:21,356 INFO [RS_FLUSH_OPERATIONS-regionserver/0d942cb2025d:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40721/user/jenkins/test-data/be876146-cd8a-b6cb-1488-9f0920b7d43b/data/default/TestHBaseWalOnEC/c8bd091d55a09f9ee467f5e8713aaacf/cf/84a82c3b2e7b49ad850a5ccfc2f818a2, entries=1, sequenceid=5, filesize=4.7 K 2024-12-08T05:51:21,357 INFO [RS_FLUSH_OPERATIONS-regionserver/0d942cb2025d:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~32 B/32, heapSize ~344 B/344, currentSize=0 B/0 for c8bd091d55a09f9ee467f5e8713aaacf in 50ms, sequenceid=5, compaction requested=false 2024-12-08T05:51:21,357 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d942cb2025d:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for c8bd091d55a09f9ee467f5e8713aaacf: 2024-12-08T05:51:21,358 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d942cb2025d:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestHBaseWalOnEC,,1733637080501.c8bd091d55a09f9ee467f5e8713aaacf. 2024-12-08T05:51:21,358 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d942cb2025d:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-12-08T05:51:21,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35661 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-12-08T05:51:21,364 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-12-08T05:51:21,365 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 208 msec 2024-12-08T05:51:21,369 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC in 219 msec 2024-12-08T05:51:21,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35661 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-08T05:51:21,466 INFO [RPCClient-NioEventLoopGroup-6-8 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestHBaseWalOnEC completed 2024-12-08T05:51:21,470 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-08T05:51:21,471 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-08T05:51:21,471 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-08T05:51:21,471 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T05:51:21,471 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T05:51:21,471 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-08T05:51:21,471 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-08T05:51:21,471 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1334995944, stopped=false 2024-12-08T05:51:21,471 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=0d942cb2025d,35661,1733637079651 2024-12-08T05:51:21,475 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46289-0x10190a06cdf0002, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-08T05:51:21,475 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34711-0x10190a06cdf0003, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-08T05:51:21,475 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35661-0x10190a06cdf0000, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-08T05:51:21,475 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41211-0x10190a06cdf0001, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-08T05:51:21,475 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34711-0x10190a06cdf0003, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T05:51:21,475 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41211-0x10190a06cdf0001, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T05:51:21,475 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46289-0x10190a06cdf0002, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T05:51:21,475 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35661-0x10190a06cdf0000, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T05:51:21,475 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-08T05:51:21,475 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:34711-0x10190a06cdf0003, quorum=127.0.0.1:60357, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-08T05:51:21,476 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:46289-0x10190a06cdf0002, quorum=127.0.0.1:60357, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-08T05:51:21,476 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-08T05:51:21,476 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:41211-0x10190a06cdf0001, quorum=127.0.0.1:60357, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-08T05:51:21,476 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-08T05:51:21,476 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:35661-0x10190a06cdf0000, quorum=127.0.0.1:60357, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-08T05:51:21,476 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T05:51:21,476 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '0d942cb2025d,41211,1733637079706' ***** 2024-12-08T05:51:21,476 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-08T05:51:21,476 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '0d942cb2025d,46289,1733637079736' ***** 2024-12-08T05:51:21,477 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-08T05:51:21,477 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '0d942cb2025d,34711,1733637079767' ***** 2024-12-08T05:51:21,477 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-08T05:51:21,477 INFO [RS:0;0d942cb2025d:41211 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-08T05:51:21,477 INFO [RS:1;0d942cb2025d:46289 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-08T05:51:21,477 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-08T05:51:21,477 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-08T05:51:21,477 INFO [RS:0;0d942cb2025d:41211 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-08T05:51:21,477 INFO [RS:0;0d942cb2025d:41211 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-08T05:51:21,477 INFO [RS:0;0d942cb2025d:41211 {}] regionserver.HRegionServer(959): stopping server 0d942cb2025d,41211,1733637079706 2024-12-08T05:51:21,477 INFO [RS:0;0d942cb2025d:41211 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-08T05:51:21,477 INFO [RS:0;0d942cb2025d:41211 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;0d942cb2025d:41211. 2024-12-08T05:51:21,477 DEBUG [RS:0;0d942cb2025d:41211 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-08T05:51:21,477 DEBUG [RS:0;0d942cb2025d:41211 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T05:51:21,477 INFO [RS:2;0d942cb2025d:34711 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-08T05:51:21,477 INFO [RS:0;0d942cb2025d:41211 {}] regionserver.HRegionServer(976): stopping server 0d942cb2025d,41211,1733637079706; all regions closed. 2024-12-08T05:51:21,477 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-08T05:51:21,478 INFO [RS:2;0d942cb2025d:34711 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-08T05:51:21,478 INFO [RS:2;0d942cb2025d:34711 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-08T05:51:21,478 INFO [RS:1;0d942cb2025d:46289 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-08T05:51:21,478 INFO [RS:2;0d942cb2025d:34711 {}] regionserver.HRegionServer(959): stopping server 0d942cb2025d,34711,1733637079767 2024-12-08T05:51:21,478 INFO [RS:1;0d942cb2025d:46289 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-08T05:51:21,478 INFO [RS:2;0d942cb2025d:34711 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-08T05:51:21,478 INFO [RS:1;0d942cb2025d:46289 {}] regionserver.HRegionServer(3091): Received CLOSE for c8bd091d55a09f9ee467f5e8713aaacf 2024-12-08T05:51:21,478 INFO [RS:2;0d942cb2025d:34711 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:2;0d942cb2025d:34711. 2024-12-08T05:51:21,478 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:51:21,478 DEBUG [RS:2;0d942cb2025d:34711 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-08T05:51:21,478 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:51:21,478 DEBUG [RS:2;0d942cb2025d:34711 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T05:51:21,478 INFO [RS:1;0d942cb2025d:46289 {}] regionserver.HRegionServer(959): stopping server 0d942cb2025d,46289,1733637079736 2024-12-08T05:51:21,478 INFO [RS:2;0d942cb2025d:34711 {}] regionserver.HRegionServer(976): stopping server 0d942cb2025d,34711,1733637079767; all regions closed. 2024-12-08T05:51:21,478 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:51:21,478 INFO [RS:1;0d942cb2025d:46289 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-08T05:51:21,478 INFO [RS:1;0d942cb2025d:46289 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;0d942cb2025d:46289. 2024-12-08T05:51:21,478 DEBUG [RS:1;0d942cb2025d:46289 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-08T05:51:21,478 DEBUG [RS_CLOSE_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing c8bd091d55a09f9ee467f5e8713aaacf, disabling compactions & flushes 2024-12-08T05:51:21,479 DEBUG [RS:1;0d942cb2025d:46289 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T05:51:21,479 INFO [RS_CLOSE_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1733637080501.c8bd091d55a09f9ee467f5e8713aaacf. 2024-12-08T05:51:21,479 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:51:21,479 INFO [RS:1;0d942cb2025d:46289 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-08T05:51:21,479 INFO [RS:1;0d942cb2025d:46289 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-08T05:51:21,479 DEBUG [RS_CLOSE_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1733637080501.c8bd091d55a09f9ee467f5e8713aaacf. 2024-12-08T05:51:21,479 INFO [RS:1;0d942cb2025d:46289 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-08T05:51:21,479 DEBUG [RS_CLOSE_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1733637080501.c8bd091d55a09f9ee467f5e8713aaacf. after waiting 0 ms 2024-12-08T05:51:21,479 INFO [RS:1;0d942cb2025d:46289 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-08T05:51:21,479 DEBUG [RS_CLOSE_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1733637080501.c8bd091d55a09f9ee467f5e8713aaacf. 2024-12-08T05:51:21,479 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:51:21,479 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:51:21,479 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:51:21,479 INFO [RS:1;0d942cb2025d:46289 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-12-08T05:51:21,479 DEBUG [RS:1;0d942cb2025d:46289 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, c8bd091d55a09f9ee467f5e8713aaacf=TestHBaseWalOnEC,,1733637080501.c8bd091d55a09f9ee467f5e8713aaacf.} 2024-12-08T05:51:21,479 DEBUG [RS_CLOSE_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-08T05:51:21,479 DEBUG [RS:1;0d942cb2025d:46289 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, c8bd091d55a09f9ee467f5e8713aaacf 2024-12-08T05:51:21,479 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:51:21,480 INFO [RS_CLOSE_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-08T05:51:21,480 DEBUG [RS_CLOSE_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-08T05:51:21,480 DEBUG [RS_CLOSE_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-08T05:51:21,480 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:51:21,480 DEBUG [RS_CLOSE_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-08T05:51:21,480 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:51:21,480 INFO [RS_CLOSE_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.34 KB heapSize=3.38 KB 2024-12-08T05:51:21,483 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36953 is added to blk_1073741834_1010 (size=93) 2024-12-08T05:51:21,484 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46317 is added to blk_1073741835_1011 (size=93) 2024-12-08T05:51:21,485 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33209 is added to blk_1073741834_1010 (size=93) 2024-12-08T05:51:21,485 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36953 is added to blk_1073741835_1011 (size=93) 2024-12-08T05:51:21,485 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46317 is added to blk_1073741834_1010 (size=93) 2024-12-08T05:51:21,485 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33209 is added to blk_1073741835_1011 (size=93) 2024-12-08T05:51:21,488 DEBUG [RS:0;0d942cb2025d:41211 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/be876146-cd8a-b6cb-1488-9f0920b7d43b/oldWALs 2024-12-08T05:51:21,488 INFO [RS:0;0d942cb2025d:41211 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 0d942cb2025d%2C41211%2C1733637079706:(num 1733637080203) 2024-12-08T05:51:21,489 DEBUG [RS:0;0d942cb2025d:41211 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T05:51:21,489 INFO [RS:0;0d942cb2025d:41211 {}] regionserver.LeaseManager(133): Closed leases 2024-12-08T05:51:21,489 INFO [RS:0;0d942cb2025d:41211 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-08T05:51:21,489 DEBUG [RS:2;0d942cb2025d:34711 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/be876146-cd8a-b6cb-1488-9f0920b7d43b/oldWALs 2024-12-08T05:51:21,489 INFO [RS:2;0d942cb2025d:34711 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 0d942cb2025d%2C34711%2C1733637079767:(num 1733637080209) 2024-12-08T05:51:21,489 DEBUG [RS:2;0d942cb2025d:34711 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T05:51:21,489 INFO [RS:2;0d942cb2025d:34711 {}] regionserver.LeaseManager(133): Closed leases 2024-12-08T05:51:21,489 INFO [RS:0;0d942cb2025d:41211 {}] hbase.ChoreService(370): Chore service for: regionserver/0d942cb2025d:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-08T05:51:21,489 DEBUG [RS_CLOSE_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40721/user/jenkins/test-data/be876146-cd8a-b6cb-1488-9f0920b7d43b/data/default/TestHBaseWalOnEC/c8bd091d55a09f9ee467f5e8713aaacf/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-08T05:51:21,489 INFO [RS:0;0d942cb2025d:41211 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-08T05:51:21,489 INFO [RS:0;0d942cb2025d:41211 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-08T05:51:21,489 INFO [RS:0;0d942cb2025d:41211 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-08T05:51:21,489 INFO [RS:0;0d942cb2025d:41211 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-08T05:51:21,490 INFO [RS:0;0d942cb2025d:41211 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:41211 2024-12-08T05:51:21,490 INFO [regionserver/0d942cb2025d:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-08T05:51:21,490 INFO [RS:2;0d942cb2025d:34711 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-08T05:51:21,491 INFO [RS_CLOSE_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1733637080501.c8bd091d55a09f9ee467f5e8713aaacf. 2024-12-08T05:51:21,491 INFO [RS:2;0d942cb2025d:34711 {}] hbase.ChoreService(370): Chore service for: regionserver/0d942cb2025d:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-08T05:51:21,491 DEBUG [RS_CLOSE_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for c8bd091d55a09f9ee467f5e8713aaacf: Waiting for close lock at 1733637081478Running coprocessor pre-close hooks at 1733637081478Disabling compacts and flushes for region at 1733637081478Disabling writes for close at 1733637081479 (+1 ms)Writing region close event to WAL at 1733637081480 (+1 ms)Running coprocessor post-close hooks at 1733637081490 (+10 ms)Closed at 1733637081491 (+1 ms) 2024-12-08T05:51:21,491 INFO [RS:2;0d942cb2025d:34711 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-08T05:51:21,491 INFO [regionserver/0d942cb2025d:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-08T05:51:21,491 INFO [RS:2;0d942cb2025d:34711 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-08T05:51:21,491 INFO [RS:2;0d942cb2025d:34711 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-08T05:51:21,491 INFO [RS:2;0d942cb2025d:34711 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-08T05:51:21,491 DEBUG [RS_CLOSE_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestHBaseWalOnEC,,1733637080501.c8bd091d55a09f9ee467f5e8713aaacf. 2024-12-08T05:51:21,491 INFO [RS:2;0d942cb2025d:34711 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:34711 2024-12-08T05:51:21,492 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41211-0x10190a06cdf0001, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/0d942cb2025d,41211,1733637079706 2024-12-08T05:51:21,492 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35661-0x10190a06cdf0000, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-08T05:51:21,492 INFO [RS:0;0d942cb2025d:41211 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-08T05:51:21,493 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34711-0x10190a06cdf0003, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/0d942cb2025d,34711,1733637079767 2024-12-08T05:51:21,493 INFO [RS:2;0d942cb2025d:34711 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-08T05:51:21,493 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [0d942cb2025d,41211,1733637079706] 2024-12-08T05:51:21,497 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/0d942cb2025d,41211,1733637079706 already deleted, retry=false 2024-12-08T05:51:21,497 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 0d942cb2025d,41211,1733637079706 expired; onlineServers=2 2024-12-08T05:51:21,497 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [0d942cb2025d,34711,1733637079767] 2024-12-08T05:51:21,498 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/0d942cb2025d,34711,1733637079767 already deleted, retry=false 2024-12-08T05:51:21,498 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 0d942cb2025d,34711,1733637079767 expired; onlineServers=1 2024-12-08T05:51:21,505 DEBUG [RS_CLOSE_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40721/user/jenkins/test-data/be876146-cd8a-b6cb-1488-9f0920b7d43b/data/hbase/meta/1588230740/.tmp/info/0fc1a035a33f440e9809f06c42be5961 is 153, key is TestHBaseWalOnEC,,1733637080501.c8bd091d55a09f9ee467f5e8713aaacf./info:regioninfo/1733637080888/Put/seqid=0 2024-12-08T05:51:21,511 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46317 is added to blk_1073741840_1016 (size=6637) 2024-12-08T05:51:21,513 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36953 is added to blk_1073741840_1016 (size=6637) 2024-12-08T05:51:21,513 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33209 is added to blk_1073741840_1016 (size=6637) 2024-12-08T05:51:21,513 INFO [RS_CLOSE_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.18 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:40721/user/jenkins/test-data/be876146-cd8a-b6cb-1488-9f0920b7d43b/data/hbase/meta/1588230740/.tmp/info/0fc1a035a33f440e9809f06c42be5961 2024-12-08T05:51:21,537 DEBUG [RS_CLOSE_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40721/user/jenkins/test-data/be876146-cd8a-b6cb-1488-9f0920b7d43b/data/hbase/meta/1588230740/.tmp/ns/6708838b98664f64b84a1b8ad70c6b33 is 43, key is default/ns:d/1733637080437/Put/seqid=0 2024-12-08T05:51:21,538 INFO [regionserver/0d942cb2025d:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-08T05:51:21,544 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36953 is added to blk_1073741841_1017 (size=5153) 2024-12-08T05:51:21,544 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46317 is added to blk_1073741841_1017 (size=5153) 2024-12-08T05:51:21,545 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33209 is added to blk_1073741841_1017 (size=5153) 2024-12-08T05:51:21,545 INFO [RS_CLOSE_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:40721/user/jenkins/test-data/be876146-cd8a-b6cb-1488-9f0920b7d43b/data/hbase/meta/1588230740/.tmp/ns/6708838b98664f64b84a1b8ad70c6b33 2024-12-08T05:51:21,568 DEBUG [RS_CLOSE_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40721/user/jenkins/test-data/be876146-cd8a-b6cb-1488-9f0920b7d43b/data/hbase/meta/1588230740/.tmp/table/0c5a2f4309d14e2d9a6e1be26c7a567b is 52, key is TestHBaseWalOnEC/table:state/1733637080905/Put/seqid=0 2024-12-08T05:51:21,569 INFO [regionserver/0d942cb2025d:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-08T05:51:21,573 INFO [regionserver/0d942cb2025d:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-08T05:51:21,575 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33209 is added to blk_1073741842_1018 (size=5249) 2024-12-08T05:51:21,575 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36953 is added to blk_1073741842_1018 (size=5249) 2024-12-08T05:51:21,576 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46317 is added to blk_1073741842_1018 (size=5249) 2024-12-08T05:51:21,576 INFO [RS_CLOSE_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=96 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:40721/user/jenkins/test-data/be876146-cd8a-b6cb-1488-9f0920b7d43b/data/hbase/meta/1588230740/.tmp/table/0c5a2f4309d14e2d9a6e1be26c7a567b 2024-12-08T05:51:21,583 DEBUG [RS_CLOSE_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40721/user/jenkins/test-data/be876146-cd8a-b6cb-1488-9f0920b7d43b/data/hbase/meta/1588230740/.tmp/info/0fc1a035a33f440e9809f06c42be5961 as hdfs://localhost:40721/user/jenkins/test-data/be876146-cd8a-b6cb-1488-9f0920b7d43b/data/hbase/meta/1588230740/info/0fc1a035a33f440e9809f06c42be5961 2024-12-08T05:51:21,591 INFO [RS_CLOSE_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40721/user/jenkins/test-data/be876146-cd8a-b6cb-1488-9f0920b7d43b/data/hbase/meta/1588230740/info/0fc1a035a33f440e9809f06c42be5961, entries=10, sequenceid=11, filesize=6.5 K 2024-12-08T05:51:21,592 DEBUG [RS_CLOSE_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40721/user/jenkins/test-data/be876146-cd8a-b6cb-1488-9f0920b7d43b/data/hbase/meta/1588230740/.tmp/ns/6708838b98664f64b84a1b8ad70c6b33 as hdfs://localhost:40721/user/jenkins/test-data/be876146-cd8a-b6cb-1488-9f0920b7d43b/data/hbase/meta/1588230740/ns/6708838b98664f64b84a1b8ad70c6b33 2024-12-08T05:51:21,595 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41211-0x10190a06cdf0001, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-08T05:51:21,595 INFO [RS:0;0d942cb2025d:41211 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-08T05:51:21,595 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41211-0x10190a06cdf0001, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-08T05:51:21,595 INFO [RS:0;0d942cb2025d:41211 {}] regionserver.HRegionServer(1031): Exiting; stopping=0d942cb2025d,41211,1733637079706; zookeeper connection closed. 2024-12-08T05:51:21,595 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@2ebe52c0 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@2ebe52c0 2024-12-08T05:51:21,597 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34711-0x10190a06cdf0003, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-08T05:51:21,597 INFO [RS:2;0d942cb2025d:34711 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-08T05:51:21,597 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34711-0x10190a06cdf0003, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-08T05:51:21,597 INFO [RS:2;0d942cb2025d:34711 {}] regionserver.HRegionServer(1031): Exiting; stopping=0d942cb2025d,34711,1733637079767; zookeeper connection closed. 2024-12-08T05:51:21,597 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@a28fdee {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@a28fdee 2024-12-08T05:51:21,599 INFO [RS_CLOSE_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40721/user/jenkins/test-data/be876146-cd8a-b6cb-1488-9f0920b7d43b/data/hbase/meta/1588230740/ns/6708838b98664f64b84a1b8ad70c6b33, entries=2, sequenceid=11, filesize=5.0 K 2024-12-08T05:51:21,600 DEBUG [RS_CLOSE_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40721/user/jenkins/test-data/be876146-cd8a-b6cb-1488-9f0920b7d43b/data/hbase/meta/1588230740/.tmp/table/0c5a2f4309d14e2d9a6e1be26c7a567b as hdfs://localhost:40721/user/jenkins/test-data/be876146-cd8a-b6cb-1488-9f0920b7d43b/data/hbase/meta/1588230740/table/0c5a2f4309d14e2d9a6e1be26c7a567b 2024-12-08T05:51:21,607 INFO [RS_CLOSE_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40721/user/jenkins/test-data/be876146-cd8a-b6cb-1488-9f0920b7d43b/data/hbase/meta/1588230740/table/0c5a2f4309d14e2d9a6e1be26c7a567b, entries=2, sequenceid=11, filesize=5.1 K 2024-12-08T05:51:21,609 INFO [RS_CLOSE_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 129ms, sequenceid=11, compaction requested=false 2024-12-08T05:51:21,615 DEBUG [RS_CLOSE_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40721/user/jenkins/test-data/be876146-cd8a-b6cb-1488-9f0920b7d43b/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-12-08T05:51:21,616 DEBUG [RS_CLOSE_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-08T05:51:21,616 INFO [RS_CLOSE_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-08T05:51:21,616 DEBUG [RS_CLOSE_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733637081479Running coprocessor pre-close hooks at 1733637081479Disabling compacts and flushes for region at 1733637081479Disabling writes for close at 1733637081480 (+1 ms)Obtaining lock to block concurrent updates at 1733637081480Preparing flush snapshotting stores in 1588230740 at 1733637081480Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1377, getHeapSize=3392, getOffHeapSize=0, getCellsCount=14 at 1733637081481 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1733637081482 (+1 ms)Flushing 1588230740/info: creating writer at 1733637081482Flushing 1588230740/info: appending metadata at 1733637081504 (+22 ms)Flushing 1588230740/info: closing flushed file at 1733637081504Flushing 1588230740/ns: creating writer at 1733637081521 (+17 ms)Flushing 1588230740/ns: appending metadata at 1733637081537 (+16 ms)Flushing 1588230740/ns: closing flushed file at 1733637081537Flushing 1588230740/table: creating writer at 1733637081552 (+15 ms)Flushing 1588230740/table: appending metadata at 1733637081568 (+16 ms)Flushing 1588230740/table: closing flushed file at 1733637081568Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@9086535: reopening flushed file at 1733637081582 (+14 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3dc61912: reopening flushed file at 1733637081591 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2596c070: reopening flushed file at 1733637081599 (+8 ms)Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 129ms, sequenceid=11, compaction requested=false at 1733637081609 (+10 ms)Writing region close event to WAL at 1733637081610 (+1 ms)Running coprocessor post-close hooks at 1733637081616 (+6 ms)Closed at 1733637081616 2024-12-08T05:51:21,616 DEBUG [RS_CLOSE_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-08T05:51:21,680 INFO [RS:1;0d942cb2025d:46289 {}] regionserver.HRegionServer(976): stopping server 0d942cb2025d,46289,1733637079736; all regions closed. 2024-12-08T05:51:21,680 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:51:21,680 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:51:21,680 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:51:21,681 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:51:21,681 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:51:21,683 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46317 is added to blk_1073741836_1012 (size=2751) 2024-12-08T05:51:21,684 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33209 is added to blk_1073741836_1012 (size=2751) 2024-12-08T05:51:21,684 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36953 is added to blk_1073741836_1012 (size=2751) 2024-12-08T05:51:21,687 DEBUG [RS:1;0d942cb2025d:46289 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/be876146-cd8a-b6cb-1488-9f0920b7d43b/oldWALs 2024-12-08T05:51:21,687 INFO [RS:1;0d942cb2025d:46289 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 0d942cb2025d%2C46289%2C1733637079736.meta:.meta(num 1733637080384) 2024-12-08T05:51:21,687 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:51:21,687 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:51:21,687 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:51:21,688 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:51:21,688 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:51:21,690 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46317 is added to blk_1073741833_1009 (size=1298) 2024-12-08T05:51:21,690 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36953 is added to blk_1073741833_1009 (size=1298) 2024-12-08T05:51:21,691 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33209 is added to blk_1073741833_1009 (size=1298) 2024-12-08T05:51:21,693 DEBUG [RS:1;0d942cb2025d:46289 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/be876146-cd8a-b6cb-1488-9f0920b7d43b/oldWALs 2024-12-08T05:51:21,693 INFO [RS:1;0d942cb2025d:46289 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 0d942cb2025d%2C46289%2C1733637079736:(num 1733637080191) 2024-12-08T05:51:21,693 DEBUG [RS:1;0d942cb2025d:46289 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T05:51:21,693 INFO [RS:1;0d942cb2025d:46289 {}] regionserver.LeaseManager(133): Closed leases 2024-12-08T05:51:21,693 INFO [RS:1;0d942cb2025d:46289 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-08T05:51:21,693 INFO [RS:1;0d942cb2025d:46289 {}] hbase.ChoreService(370): Chore service for: regionserver/0d942cb2025d:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-08T05:51:21,693 INFO [RS:1;0d942cb2025d:46289 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-08T05:51:21,693 INFO [regionserver/0d942cb2025d:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-08T05:51:21,693 INFO [RS:1;0d942cb2025d:46289 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:46289 2024-12-08T05:51:21,695 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35661-0x10190a06cdf0000, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-08T05:51:21,695 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46289-0x10190a06cdf0002, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/0d942cb2025d,46289,1733637079736 2024-12-08T05:51:21,695 INFO [RS:1;0d942cb2025d:46289 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-08T05:51:21,697 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [0d942cb2025d,46289,1733637079736] 2024-12-08T05:51:21,700 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/0d942cb2025d,46289,1733637079736 already deleted, retry=false 2024-12-08T05:51:21,700 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 0d942cb2025d,46289,1733637079736 expired; onlineServers=0 2024-12-08T05:51:21,700 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '0d942cb2025d,35661,1733637079651' ***** 2024-12-08T05:51:21,700 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-08T05:51:21,700 INFO [M:0;0d942cb2025d:35661 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-08T05:51:21,700 INFO [M:0;0d942cb2025d:35661 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-08T05:51:21,700 DEBUG [M:0;0d942cb2025d:35661 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-08T05:51:21,700 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-08T05:51:21,700 DEBUG [M:0;0d942cb2025d:35661 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-08T05:51:21,700 DEBUG [master/0d942cb2025d:0:becomeActiveMaster-HFileCleaner.small.0-1733637079988 {}] cleaner.HFileCleaner(306): Exit Thread[master/0d942cb2025d:0:becomeActiveMaster-HFileCleaner.small.0-1733637079988,5,FailOnTimeoutGroup] 2024-12-08T05:51:21,700 DEBUG [master/0d942cb2025d:0:becomeActiveMaster-HFileCleaner.large.0-1733637079984 {}] cleaner.HFileCleaner(306): Exit Thread[master/0d942cb2025d:0:becomeActiveMaster-HFileCleaner.large.0-1733637079984,5,FailOnTimeoutGroup] 2024-12-08T05:51:21,700 INFO [M:0;0d942cb2025d:35661 {}] hbase.ChoreService(370): Chore service for: master/0d942cb2025d:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-08T05:51:21,700 INFO [M:0;0d942cb2025d:35661 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-08T05:51:21,701 DEBUG [M:0;0d942cb2025d:35661 {}] master.HMaster(1795): Stopping service threads 2024-12-08T05:51:21,701 INFO [M:0;0d942cb2025d:35661 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-08T05:51:21,701 INFO [M:0;0d942cb2025d:35661 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-08T05:51:21,701 INFO [M:0;0d942cb2025d:35661 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-08T05:51:21,701 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-08T05:51:21,702 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35661-0x10190a06cdf0000, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-08T05:51:21,702 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35661-0x10190a06cdf0000, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T05:51:21,702 DEBUG [M:0;0d942cb2025d:35661 {}] zookeeper.ZKUtil(347): master:35661-0x10190a06cdf0000, quorum=127.0.0.1:60357, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-08T05:51:21,702 WARN [M:0;0d942cb2025d:35661 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-08T05:51:21,702 INFO [M:0;0d942cb2025d:35661 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:40721/user/jenkins/test-data/be876146-cd8a-b6cb-1488-9f0920b7d43b/.lastflushedseqids 2024-12-08T05:51:21,710 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46317 is added to blk_1073741843_1019 (size=127) 2024-12-08T05:51:21,710 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33209 is added to blk_1073741843_1019 (size=127) 2024-12-08T05:51:21,711 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36953 is added to blk_1073741843_1019 (size=127) 2024-12-08T05:51:21,711 INFO [M:0;0d942cb2025d:35661 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-08T05:51:21,711 INFO [M:0;0d942cb2025d:35661 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-08T05:51:21,711 DEBUG [M:0;0d942cb2025d:35661 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-08T05:51:21,711 INFO [M:0;0d942cb2025d:35661 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T05:51:21,711 DEBUG [M:0;0d942cb2025d:35661 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T05:51:21,711 DEBUG [M:0;0d942cb2025d:35661 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-08T05:51:21,711 DEBUG [M:0;0d942cb2025d:35661 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T05:51:21,711 INFO [M:0;0d942cb2025d:35661 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=26.83 KB heapSize=34.12 KB 2024-12-08T05:51:21,730 DEBUG [M:0;0d942cb2025d:35661 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40721/user/jenkins/test-data/be876146-cd8a-b6cb-1488-9f0920b7d43b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/f759f91f0b824a2790d5cc90dce13541 is 82, key is hbase:meta,,1/info:regioninfo/1733637080421/Put/seqid=0 2024-12-08T05:51:21,737 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46317 is added to blk_1073741844_1020 (size=5672) 2024-12-08T05:51:21,737 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33209 is added to blk_1073741844_1020 (size=5672) 2024-12-08T05:51:21,737 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36953 is added to blk_1073741844_1020 (size=5672) 2024-12-08T05:51:21,738 INFO [M:0;0d942cb2025d:35661 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:40721/user/jenkins/test-data/be876146-cd8a-b6cb-1488-9f0920b7d43b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/f759f91f0b824a2790d5cc90dce13541 2024-12-08T05:51:21,761 DEBUG [M:0;0d942cb2025d:35661 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40721/user/jenkins/test-data/be876146-cd8a-b6cb-1488-9f0920b7d43b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/b64753de95c949ffb06cf6ea145941fa is 748, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1733637080912/Put/seqid=0 2024-12-08T05:51:21,768 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36953 is added to blk_1073741845_1021 (size=6439) 2024-12-08T05:51:21,768 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46317 is added to blk_1073741845_1021 (size=6439) 2024-12-08T05:51:21,768 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33209 is added to blk_1073741845_1021 (size=6439) 2024-12-08T05:51:21,769 INFO [M:0;0d942cb2025d:35661 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.14 KB at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:40721/user/jenkins/test-data/be876146-cd8a-b6cb-1488-9f0920b7d43b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/b64753de95c949ffb06cf6ea145941fa 2024-12-08T05:51:21,790 DEBUG [M:0;0d942cb2025d:35661 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40721/user/jenkins/test-data/be876146-cd8a-b6cb-1488-9f0920b7d43b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/96c51c12677544958bc9b4007a7d8d62 is 69, key is 0d942cb2025d,34711,1733637079767/rs:state/1733637080018/Put/seqid=0 2024-12-08T05:51:21,797 INFO [RS:1;0d942cb2025d:46289 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-08T05:51:21,797 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46289-0x10190a06cdf0002, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-08T05:51:21,797 INFO [RS:1;0d942cb2025d:46289 {}] regionserver.HRegionServer(1031): Exiting; stopping=0d942cb2025d,46289,1733637079736; zookeeper connection closed. 2024-12-08T05:51:21,797 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46289-0x10190a06cdf0002, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-08T05:51:21,797 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@51d39678 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@51d39678 2024-12-08T05:51:21,797 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46317 is added to blk_1073741846_1022 (size=5294) 2024-12-08T05:51:21,798 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 3 regionserver(s) complete 2024-12-08T05:51:21,798 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36953 is added to blk_1073741846_1022 (size=5294) 2024-12-08T05:51:21,798 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33209 is added to blk_1073741846_1022 (size=5294) 2024-12-08T05:51:21,799 INFO [M:0;0d942cb2025d:35661 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=195 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:40721/user/jenkins/test-data/be876146-cd8a-b6cb-1488-9f0920b7d43b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/96c51c12677544958bc9b4007a7d8d62 2024-12-08T05:51:21,806 DEBUG [M:0;0d942cb2025d:35661 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40721/user/jenkins/test-data/be876146-cd8a-b6cb-1488-9f0920b7d43b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/f759f91f0b824a2790d5cc90dce13541 as hdfs://localhost:40721/user/jenkins/test-data/be876146-cd8a-b6cb-1488-9f0920b7d43b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/f759f91f0b824a2790d5cc90dce13541 2024-12-08T05:51:21,813 INFO [M:0;0d942cb2025d:35661 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40721/user/jenkins/test-data/be876146-cd8a-b6cb-1488-9f0920b7d43b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/f759f91f0b824a2790d5cc90dce13541, entries=8, sequenceid=72, filesize=5.5 K 2024-12-08T05:51:21,815 DEBUG [M:0;0d942cb2025d:35661 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40721/user/jenkins/test-data/be876146-cd8a-b6cb-1488-9f0920b7d43b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/b64753de95c949ffb06cf6ea145941fa as hdfs://localhost:40721/user/jenkins/test-data/be876146-cd8a-b6cb-1488-9f0920b7d43b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/b64753de95c949ffb06cf6ea145941fa 2024-12-08T05:51:21,821 INFO [M:0;0d942cb2025d:35661 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40721/user/jenkins/test-data/be876146-cd8a-b6cb-1488-9f0920b7d43b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/b64753de95c949ffb06cf6ea145941fa, entries=8, sequenceid=72, filesize=6.3 K 2024-12-08T05:51:21,822 DEBUG [M:0;0d942cb2025d:35661 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40721/user/jenkins/test-data/be876146-cd8a-b6cb-1488-9f0920b7d43b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/96c51c12677544958bc9b4007a7d8d62 as hdfs://localhost:40721/user/jenkins/test-data/be876146-cd8a-b6cb-1488-9f0920b7d43b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/96c51c12677544958bc9b4007a7d8d62 2024-12-08T05:51:21,828 INFO [M:0;0d942cb2025d:35661 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40721/user/jenkins/test-data/be876146-cd8a-b6cb-1488-9f0920b7d43b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/96c51c12677544958bc9b4007a7d8d62, entries=3, sequenceid=72, filesize=5.2 K 2024-12-08T05:51:21,830 INFO [M:0;0d942cb2025d:35661 {}] regionserver.HRegion(3140): Finished flush of dataSize ~26.83 KB/27471, heapSize ~33.82 KB/34632, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 119ms, sequenceid=72, compaction requested=false 2024-12-08T05:51:21,831 INFO [M:0;0d942cb2025d:35661 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T05:51:21,831 DEBUG [M:0;0d942cb2025d:35661 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733637081711Disabling compacts and flushes for region at 1733637081711Disabling writes for close at 1733637081711Obtaining lock to block concurrent updates at 1733637081711Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733637081711Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=27471, getHeapSize=34872, getOffHeapSize=0, getCellsCount=85 at 1733637081712 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733637081713 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733637081713Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733637081729 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733637081729Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733637081744 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733637081761 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733637081761Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733637081775 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733637081790 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733637081790Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@c6c9c85: reopening flushed file at 1733637081805 (+15 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6f56585f: reopening flushed file at 1733637081813 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@cc8f03: reopening flushed file at 1733637081821 (+8 ms)Finished flush of dataSize ~26.83 KB/27471, heapSize ~33.82 KB/34632, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 119ms, sequenceid=72, compaction requested=false at 1733637081830 (+9 ms)Writing region close event to WAL at 1733637081831 (+1 ms)Closed at 1733637081831 2024-12-08T05:51:21,832 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:51:21,832 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:51:21,832 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:51:21,832 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:51:21,832 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:51:21,835 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46317 is added to blk_1073741830_1006 (size=32674) 2024-12-08T05:51:21,835 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36953 is added to blk_1073741830_1006 (size=32674) 2024-12-08T05:51:21,836 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33209 is added to blk_1073741830_1006 (size=32674) 2024-12-08T05:51:21,836 INFO [M:0;0d942cb2025d:35661 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-08T05:51:21,836 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-08T05:51:21,836 INFO [M:0;0d942cb2025d:35661 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:35661 2024-12-08T05:51:21,837 INFO [M:0;0d942cb2025d:35661 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-08T05:51:21,938 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35661-0x10190a06cdf0000, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-08T05:51:21,938 INFO [M:0;0d942cb2025d:35661 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-08T05:51:21,938 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35661-0x10190a06cdf0000, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-08T05:51:21,941 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7aa48b42{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-08T05:51:21,941 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4924c16d{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-08T05:51:21,941 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-08T05:51:21,942 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7adc0795{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-08T05:51:21,942 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@328032c9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6fa84232-e713-5021-9152-c233061becb5/hadoop.log.dir/,STOPPED} 2024-12-08T05:51:21,943 WARN [BP-909281240-172.17.0.2-1733637078752 heartbeating to localhost/127.0.0.1:40721 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-08T05:51:21,943 WARN [BP-909281240-172.17.0.2-1733637078752 heartbeating to localhost/127.0.0.1:40721 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-909281240-172.17.0.2-1733637078752 (Datanode Uuid 5c41820f-237d-41b5-81ad-29a090585a50) service to localhost/127.0.0.1:40721 2024-12-08T05:51:21,943 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-08T05:51:21,943 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-08T05:51:21,944 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6fa84232-e713-5021-9152-c233061becb5/cluster_151486c2-c699-da18-59fb-290a32d48eee/data/data5/current/BP-909281240-172.17.0.2-1733637078752 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-08T05:51:21,944 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6fa84232-e713-5021-9152-c233061becb5/cluster_151486c2-c699-da18-59fb-290a32d48eee/data/data6/current/BP-909281240-172.17.0.2-1733637078752 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-08T05:51:21,944 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-08T05:51:21,946 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@31ac7b98{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-08T05:51:21,946 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@284cd9af{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-08T05:51:21,946 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-08T05:51:21,946 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@69cb0b1f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-08T05:51:21,947 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@564d8641{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6fa84232-e713-5021-9152-c233061becb5/hadoop.log.dir/,STOPPED} 2024-12-08T05:51:21,947 WARN [BP-909281240-172.17.0.2-1733637078752 heartbeating to localhost/127.0.0.1:40721 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-08T05:51:21,947 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-08T05:51:21,948 WARN [BP-909281240-172.17.0.2-1733637078752 heartbeating to localhost/127.0.0.1:40721 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-909281240-172.17.0.2-1733637078752 (Datanode Uuid cc57298d-2bd5-42c8-8d5c-e752448197d9) service to localhost/127.0.0.1:40721 2024-12-08T05:51:21,948 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-08T05:51:21,948 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6fa84232-e713-5021-9152-c233061becb5/cluster_151486c2-c699-da18-59fb-290a32d48eee/data/data3/current/BP-909281240-172.17.0.2-1733637078752 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-08T05:51:21,948 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6fa84232-e713-5021-9152-c233061becb5/cluster_151486c2-c699-da18-59fb-290a32d48eee/data/data4/current/BP-909281240-172.17.0.2-1733637078752 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-08T05:51:21,949 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-08T05:51:21,951 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5fbe2fc0{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-08T05:51:21,951 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@652901a4{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-08T05:51:21,951 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-08T05:51:21,951 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5a6744cf{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-08T05:51:21,951 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6d1c78c1{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6fa84232-e713-5021-9152-c233061becb5/hadoop.log.dir/,STOPPED} 2024-12-08T05:51:21,952 WARN [BP-909281240-172.17.0.2-1733637078752 heartbeating to localhost/127.0.0.1:40721 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-08T05:51:21,952 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-08T05:51:21,952 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-08T05:51:21,953 WARN [BP-909281240-172.17.0.2-1733637078752 heartbeating to localhost/127.0.0.1:40721 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-909281240-172.17.0.2-1733637078752 (Datanode Uuid b30de565-a784-4e5b-aa43-30e87f66dcf3) service to localhost/127.0.0.1:40721 2024-12-08T05:51:21,953 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6fa84232-e713-5021-9152-c233061becb5/cluster_151486c2-c699-da18-59fb-290a32d48eee/data/data1/current/BP-909281240-172.17.0.2-1733637078752 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-08T05:51:21,953 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6fa84232-e713-5021-9152-c233061becb5/cluster_151486c2-c699-da18-59fb-290a32d48eee/data/data2/current/BP-909281240-172.17.0.2-1733637078752 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-08T05:51:21,954 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-08T05:51:21,960 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4999c610{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-08T05:51:21,960 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@18b4ebc7{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-08T05:51:21,960 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-08T05:51:21,961 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@49b2b984{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-08T05:51:21,961 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@18ee857{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6fa84232-e713-5021-9152-c233061becb5/hadoop.log.dir/,STOPPED} 2024-12-08T05:51:21,968 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-08T05:51:21,992 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-08T05:51:22,000 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestHBaseWalOnEC#testReadWrite[1] Thread=144 (was 86) - Thread LEAK? -, OpenFileDescriptor=521 (was 439) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=156 (was 156), ProcessCount=11 (was 11), AvailableMemoryMB=7665 (was 7814)