2024-12-05 19:53:39,708 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba 2024-12-05 19:53:39,720 main DEBUG Took 0.010427 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-12-05 19:53:39,720 main DEBUG PluginManager 'Core' found 129 plugins 2024-12-05 19:53:39,721 main DEBUG PluginManager 'Level' found 0 plugins 2024-12-05 19:53:39,722 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-12-05 19:53:39,723 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-05 19:53:39,733 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-12-05 19:53:39,755 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-05 19:53:39,756 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-05 19:53:39,757 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-05 19:53:39,757 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-05 19:53:39,758 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-05 19:53:39,758 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-05 19:53:39,759 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-05 19:53:39,759 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-05 19:53:39,759 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-05 19:53:39,760 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-05 19:53:39,760 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-05 19:53:39,761 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-05 19:53:39,761 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-05 19:53:39,761 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-05 19:53:39,762 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-05 19:53:39,762 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-05 19:53:39,763 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-05 19:53:39,763 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-05 19:53:39,764 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-05 19:53:39,764 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-05 19:53:39,764 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-05 19:53:39,765 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-05 19:53:39,765 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-05 19:53:39,765 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-05 19:53:39,766 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-05 19:53:39,766 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-12-05 19:53:39,767 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-05 19:53:39,769 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-12-05 19:53:39,770 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-12-05 19:53:39,771 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-12-05 19:53:39,772 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-12-05 19:53:39,772 main DEBUG PluginManager 'Converter' found 47 plugins 2024-12-05 19:53:39,782 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-12-05 19:53:39,784 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-12-05 19:53:39,786 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-12-05 19:53:39,786 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-12-05 19:53:39,787 main DEBUG createAppenders(={Console}) 2024-12-05 19:53:39,788 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba initialized 2024-12-05 19:53:39,788 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba 2024-12-05 19:53:39,788 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba OK. 2024-12-05 19:53:39,789 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-12-05 19:53:39,789 main DEBUG OutputStream closed 2024-12-05 19:53:39,789 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-12-05 19:53:39,789 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-12-05 19:53:39,790 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@49c7b90e OK 2024-12-05 19:53:39,860 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-12-05 19:53:39,862 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-12-05 19:53:39,863 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-12-05 19:53:39,864 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-12-05 19:53:39,865 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-12-05 19:53:39,865 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-12-05 19:53:39,865 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-12-05 19:53:39,866 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-12-05 19:53:39,866 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-12-05 19:53:39,866 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-12-05 19:53:39,866 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-12-05 19:53:39,867 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-12-05 19:53:39,867 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-12-05 19:53:39,867 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-12-05 19:53:39,868 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-12-05 19:53:39,868 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-12-05 19:53:39,868 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-12-05 19:53:39,869 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-12-05 19:53:39,871 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-05 19:53:39,872 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-logging/target/hbase-logging-4.0.0-alpha-1-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@35432107) with optional ClassLoader: null 2024-12-05 19:53:39,872 main DEBUG Shutdown hook enabled. Registering a new one. 2024-12-05 19:53:39,873 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@35432107] started OK. 2024-12-05T19:53:39,889 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC timeout: 26 mins 2024-12-05 19:53:39,892 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-12-05 19:53:39,893 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-05T19:53:40,200 DEBUG [main {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/afae0248-3f86-1e7b-058f-943a1d58cb39 2024-12-05T19:53:40,235 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/afae0248-3f86-1e7b-058f-943a1d58cb39/cluster_1622064e-d3e6-0ddc-f64d-1a89f33fe697, deleteOnExit=true 2024-12-05T19:53:40,236 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/afae0248-3f86-1e7b-058f-943a1d58cb39/test.cache.data in system properties and HBase conf 2024-12-05T19:53:40,236 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/afae0248-3f86-1e7b-058f-943a1d58cb39/hadoop.tmp.dir in system properties and HBase conf 2024-12-05T19:53:40,237 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/afae0248-3f86-1e7b-058f-943a1d58cb39/hadoop.log.dir in system properties and HBase conf 2024-12-05T19:53:40,238 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/afae0248-3f86-1e7b-058f-943a1d58cb39/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-05T19:53:40,238 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/afae0248-3f86-1e7b-058f-943a1d58cb39/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-05T19:53:40,238 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-05T19:53:40,344 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-12-05T19:53:40,484 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-05T19:53:40,490 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/afae0248-3f86-1e7b-058f-943a1d58cb39/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-05T19:53:40,490 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/afae0248-3f86-1e7b-058f-943a1d58cb39/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-05T19:53:40,491 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/afae0248-3f86-1e7b-058f-943a1d58cb39/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-05T19:53:40,492 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/afae0248-3f86-1e7b-058f-943a1d58cb39/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-05T19:53:40,492 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/afae0248-3f86-1e7b-058f-943a1d58cb39/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-05T19:53:40,493 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/afae0248-3f86-1e7b-058f-943a1d58cb39/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-05T19:53:40,494 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/afae0248-3f86-1e7b-058f-943a1d58cb39/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-05T19:53:40,494 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/afae0248-3f86-1e7b-058f-943a1d58cb39/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-05T19:53:40,495 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/afae0248-3f86-1e7b-058f-943a1d58cb39/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-05T19:53:40,496 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/afae0248-3f86-1e7b-058f-943a1d58cb39/nfs.dump.dir in system properties and HBase conf 2024-12-05T19:53:40,496 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/afae0248-3f86-1e7b-058f-943a1d58cb39/java.io.tmpdir in system properties and HBase conf 2024-12-05T19:53:40,497 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/afae0248-3f86-1e7b-058f-943a1d58cb39/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-05T19:53:40,497 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/afae0248-3f86-1e7b-058f-943a1d58cb39/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-05T19:53:40,498 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/afae0248-3f86-1e7b-058f-943a1d58cb39/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-05T19:53:41,420 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-12-05T19:53:41,520 INFO [Time-limited test {}] log.Log(170): Logging initialized @2497ms to org.eclipse.jetty.util.log.Slf4jLog 2024-12-05T19:53:41,606 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-05T19:53:41,677 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-05T19:53:41,698 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-05T19:53:41,699 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-05T19:53:41,700 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-05T19:53:41,715 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-05T19:53:41,718 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@21b7d177{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/afae0248-3f86-1e7b-058f-943a1d58cb39/hadoop.log.dir/,AVAILABLE} 2024-12-05T19:53:41,719 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@383d55e4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-05T19:53:41,917 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@76e4c45c{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/afae0248-3f86-1e7b-058f-943a1d58cb39/java.io.tmpdir/jetty-localhost-45783-hadoop-hdfs-3_4_1-tests_jar-_-any-3262849705985358295/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-05T19:53:41,924 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4637aff6{HTTP/1.1, (http/1.1)}{localhost:45783} 2024-12-05T19:53:41,925 INFO [Time-limited test {}] server.Server(415): Started @2902ms 2024-12-05T19:53:42,349 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-05T19:53:42,359 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-05T19:53:42,360 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-05T19:53:42,360 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-05T19:53:42,361 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-05T19:53:42,362 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@550154bd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/afae0248-3f86-1e7b-058f-943a1d58cb39/hadoop.log.dir/,AVAILABLE} 2024-12-05T19:53:42,362 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1a2478ad{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-05T19:53:42,489 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4839957b{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/afae0248-3f86-1e7b-058f-943a1d58cb39/java.io.tmpdir/jetty-localhost-42771-hadoop-hdfs-3_4_1-tests_jar-_-any-17575430163941588971/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-05T19:53:42,490 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5306f615{HTTP/1.1, (http/1.1)}{localhost:42771} 2024-12-05T19:53:42,490 INFO [Time-limited test {}] server.Server(415): Started @3468ms 2024-12-05T19:53:42,547 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-05T19:53:42,669 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-05T19:53:42,676 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-05T19:53:42,685 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-05T19:53:42,685 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-05T19:53:42,685 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-05T19:53:42,686 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6463ad04{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/afae0248-3f86-1e7b-058f-943a1d58cb39/hadoop.log.dir/,AVAILABLE} 2024-12-05T19:53:42,687 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7fa8fa5c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-05T19:53:42,852 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1c6b8f01{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/afae0248-3f86-1e7b-058f-943a1d58cb39/java.io.tmpdir/jetty-localhost-34431-hadoop-hdfs-3_4_1-tests_jar-_-any-915690663654670356/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-05T19:53:42,853 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@11f28dd2{HTTP/1.1, (http/1.1)}{localhost:34431} 2024-12-05T19:53:42,853 INFO [Time-limited test {}] server.Server(415): Started @3831ms 2024-12-05T19:53:42,855 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-05T19:53:42,899 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-05T19:53:42,903 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-05T19:53:42,906 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-05T19:53:42,906 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-05T19:53:42,906 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-05T19:53:42,908 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@c62369b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/afae0248-3f86-1e7b-058f-943a1d58cb39/hadoop.log.dir/,AVAILABLE} 2024-12-05T19:53:42,909 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@24f92c39{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-05T19:53:43,014 WARN [Thread-106 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/afae0248-3f86-1e7b-058f-943a1d58cb39/cluster_1622064e-d3e6-0ddc-f64d-1a89f33fe697/data/data2/current/BP-1009711422-172.17.0.2-1733428421158/current, will proceed with Du for space computation calculation, 2024-12-05T19:53:43,014 WARN [Thread-108 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/afae0248-3f86-1e7b-058f-943a1d58cb39/cluster_1622064e-d3e6-0ddc-f64d-1a89f33fe697/data/data4/current/BP-1009711422-172.17.0.2-1733428421158/current, will proceed with Du for space computation calculation, 2024-12-05T19:53:43,014 WARN [Thread-107 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/afae0248-3f86-1e7b-058f-943a1d58cb39/cluster_1622064e-d3e6-0ddc-f64d-1a89f33fe697/data/data3/current/BP-1009711422-172.17.0.2-1733428421158/current, will proceed with Du for space computation calculation, 2024-12-05T19:53:43,014 WARN [Thread-105 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/afae0248-3f86-1e7b-058f-943a1d58cb39/cluster_1622064e-d3e6-0ddc-f64d-1a89f33fe697/data/data1/current/BP-1009711422-172.17.0.2-1733428421158/current, will proceed with Du for space computation calculation, 2024-12-05T19:53:43,059 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2e59159d{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/afae0248-3f86-1e7b-058f-943a1d58cb39/java.io.tmpdir/jetty-localhost-33939-hadoop-hdfs-3_4_1-tests_jar-_-any-12096487338790648543/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-05T19:53:43,060 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@a8e922f{HTTP/1.1, (http/1.1)}{localhost:33939} 2024-12-05T19:53:43,060 INFO [Time-limited test {}] server.Server(415): Started @4038ms 2024-12-05T19:53:43,063 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-05T19:53:43,065 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-05T19:53:43,065 WARN [Thread-82 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-05T19:53:43,142 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xaeed91565a6f4070 with lease ID 0xf5b139779796420a: Processing first storage report for DS-5149af7e-c5ad-44aa-827f-2681ec8ad3f1 from datanode DatanodeRegistration(127.0.0.1:37605, datanodeUuid=5cbbda21-0fc6-4b59-9453-b5d8c0d6c385, infoPort=42277, infoSecurePort=0, ipcPort=43833, storageInfo=lv=-57;cid=testClusterID;nsid=411540303;c=1733428421158) 2024-12-05T19:53:43,143 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xaeed91565a6f4070 with lease ID 0xf5b139779796420a: from storage DS-5149af7e-c5ad-44aa-827f-2681ec8ad3f1 node DatanodeRegistration(127.0.0.1:37605, datanodeUuid=5cbbda21-0fc6-4b59-9453-b5d8c0d6c385, infoPort=42277, infoSecurePort=0, ipcPort=43833, storageInfo=lv=-57;cid=testClusterID;nsid=411540303;c=1733428421158), blocks: 0, hasStaleStorage: true, processing time: 2 msecs, invalidatedBlocks: 0 2024-12-05T19:53:43,144 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x94fa59a1ef9a59c0 with lease ID 0xf5b139779796420b: Processing first storage report for DS-2cff1dd6-4cc4-4fde-8aea-38e4792115d1 from datanode DatanodeRegistration(127.0.0.1:44813, datanodeUuid=0510c327-bcf0-4f71-ae18-e8c5ddacdcf9, infoPort=44963, infoSecurePort=0, ipcPort=37121, storageInfo=lv=-57;cid=testClusterID;nsid=411540303;c=1733428421158) 2024-12-05T19:53:43,144 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x94fa59a1ef9a59c0 with lease ID 0xf5b139779796420b: from storage DS-2cff1dd6-4cc4-4fde-8aea-38e4792115d1 node DatanodeRegistration(127.0.0.1:44813, datanodeUuid=0510c327-bcf0-4f71-ae18-e8c5ddacdcf9, infoPort=44963, infoSecurePort=0, ipcPort=37121, storageInfo=lv=-57;cid=testClusterID;nsid=411540303;c=1733428421158), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-05T19:53:43,145 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xaeed91565a6f4070 with lease ID 0xf5b139779796420a: Processing first storage report for DS-6f527bf9-e576-4715-b7a7-d23438e43eda from datanode DatanodeRegistration(127.0.0.1:37605, datanodeUuid=5cbbda21-0fc6-4b59-9453-b5d8c0d6c385, infoPort=42277, infoSecurePort=0, ipcPort=43833, storageInfo=lv=-57;cid=testClusterID;nsid=411540303;c=1733428421158) 2024-12-05T19:53:43,145 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xaeed91565a6f4070 with lease ID 0xf5b139779796420a: from storage DS-6f527bf9-e576-4715-b7a7-d23438e43eda node DatanodeRegistration(127.0.0.1:37605, datanodeUuid=5cbbda21-0fc6-4b59-9453-b5d8c0d6c385, infoPort=42277, infoSecurePort=0, ipcPort=43833, storageInfo=lv=-57;cid=testClusterID;nsid=411540303;c=1733428421158), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-05T19:53:43,145 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x94fa59a1ef9a59c0 with lease ID 0xf5b139779796420b: Processing first storage report for DS-76388200-379c-492e-98c4-2805e56ee1ce from datanode DatanodeRegistration(127.0.0.1:44813, datanodeUuid=0510c327-bcf0-4f71-ae18-e8c5ddacdcf9, infoPort=44963, infoSecurePort=0, ipcPort=37121, storageInfo=lv=-57;cid=testClusterID;nsid=411540303;c=1733428421158) 2024-12-05T19:53:43,145 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x94fa59a1ef9a59c0 with lease ID 0xf5b139779796420b: from storage DS-76388200-379c-492e-98c4-2805e56ee1ce node DatanodeRegistration(127.0.0.1:44813, datanodeUuid=0510c327-bcf0-4f71-ae18-e8c5ddacdcf9, infoPort=44963, infoSecurePort=0, ipcPort=37121, storageInfo=lv=-57;cid=testClusterID;nsid=411540303;c=1733428421158), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-05T19:53:43,200 WARN [Thread-139 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/afae0248-3f86-1e7b-058f-943a1d58cb39/cluster_1622064e-d3e6-0ddc-f64d-1a89f33fe697/data/data5/current/BP-1009711422-172.17.0.2-1733428421158/current, will proceed with Du for space computation calculation, 2024-12-05T19:53:43,200 WARN [Thread-140 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/afae0248-3f86-1e7b-058f-943a1d58cb39/cluster_1622064e-d3e6-0ddc-f64d-1a89f33fe697/data/data6/current/BP-1009711422-172.17.0.2-1733428421158/current, will proceed with Du for space computation calculation, 2024-12-05T19:53:43,224 WARN [Thread-129 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-05T19:53:43,230 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xd2441fb52dcbc2fa with lease ID 0xf5b139779796420c: Processing first storage report for DS-f41606fc-e486-4adc-b1c2-ef9ee721c920 from datanode DatanodeRegistration(127.0.0.1:40207, datanodeUuid=c958940b-4ae7-462d-8566-abb6a91d2b6c, infoPort=39295, infoSecurePort=0, ipcPort=43769, storageInfo=lv=-57;cid=testClusterID;nsid=411540303;c=1733428421158) 2024-12-05T19:53:43,230 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd2441fb52dcbc2fa with lease ID 0xf5b139779796420c: from storage DS-f41606fc-e486-4adc-b1c2-ef9ee721c920 node DatanodeRegistration(127.0.0.1:40207, datanodeUuid=c958940b-4ae7-462d-8566-abb6a91d2b6c, infoPort=39295, infoSecurePort=0, ipcPort=43769, storageInfo=lv=-57;cid=testClusterID;nsid=411540303;c=1733428421158), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-05T19:53:43,230 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xd2441fb52dcbc2fa with lease ID 0xf5b139779796420c: Processing first storage report for DS-28471324-983a-4836-abca-1b99739a0410 from datanode DatanodeRegistration(127.0.0.1:40207, datanodeUuid=c958940b-4ae7-462d-8566-abb6a91d2b6c, infoPort=39295, infoSecurePort=0, ipcPort=43769, storageInfo=lv=-57;cid=testClusterID;nsid=411540303;c=1733428421158) 2024-12-05T19:53:43,230 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd2441fb52dcbc2fa with lease ID 0xf5b139779796420c: from storage DS-28471324-983a-4836-abca-1b99739a0410 node DatanodeRegistration(127.0.0.1:40207, datanodeUuid=c958940b-4ae7-462d-8566-abb6a91d2b6c, infoPort=39295, infoSecurePort=0, ipcPort=43769, storageInfo=lv=-57;cid=testClusterID;nsid=411540303;c=1733428421158), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-05T19:53:43,493 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/afae0248-3f86-1e7b-058f-943a1d58cb39 2024-12-05T19:53:43,571 WARN [Time-limited test {}] erasurecode.ErasureCodeNative(55): ISA-L support is not available in your platform... using builtin-java codec where applicable 2024-12-05T19:53:43,628 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestHBaseWalOnEC#testReadWrite[0] Thread=158, OpenFileDescriptor=391, MaxFileDescriptor=1048576, SystemLoadAverage=284, ProcessCount=11, AvailableMemoryMB=8903 2024-12-05T19:53:43,630 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=3, rsPorts=, rsClass=null, numDataNodes=3, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-05T19:53:43,637 INFO [Time-limited test {}] hbase.HBaseTestingUtil(821): NOT STARTING DFS 2024-12-05T19:53:43,726 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/afae0248-3f86-1e7b-058f-943a1d58cb39/cluster_1622064e-d3e6-0ddc-f64d-1a89f33fe697/zookeeper_0, clientPort=60123, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/afae0248-3f86-1e7b-058f-943a1d58cb39/cluster_1622064e-d3e6-0ddc-f64d-1a89f33fe697/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/afae0248-3f86-1e7b-058f-943a1d58cb39/cluster_1622064e-d3e6-0ddc-f64d-1a89f33fe697/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-05T19:53:43,738 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=60123 2024-12-05T19:53:43,768 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T19:53:43,772 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T19:53:43,874 WARN [Time-limited test {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-05T19:53:43,874 WARN [Time-limited test {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-05T19:53:43,928 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_919299418_22 at /127.0.0.1:34188 [Receiving block BP-1009711422-172.17.0.2-1733428421158:blk_-9223372036854775792_1001] {}] datanode.DataXceiver(331): 127.0.0.1:40207:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34188 dst: /127.0.0.1:40207 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-05T19:53:43,950 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40207 is added to blk_-9223372036854775792_1002 (size=7) 2024-12-05T19:53:44,346 WARN [Time-limited test {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-05T19:53:44,359 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:46315/user/jenkins/test-data/1a6465ef-adda-b6b8-36b8-07863dede557 with version=8 2024-12-05T19:53:44,360 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:46315/user/jenkins/test-data/1a6465ef-adda-b6b8-36b8-07863dede557/hbase-staging 2024-12-05T19:53:44,464 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-12-05T19:53:44,726 INFO [Time-limited test {}] client.ConnectionUtils(128): master/86162e2766a8:0 server-side Connection retries=45 2024-12-05T19:53:44,738 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-05T19:53:44,738 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-05T19:53:44,743 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-05T19:53:44,743 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-05T19:53:44,743 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-05T19:53:44,886 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-05T19:53:44,950 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-12-05T19:53:44,958 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-12-05T19:53:44,963 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-05T19:53:44,992 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 12364 (auto-detected) 2024-12-05T19:53:44,993 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-12-05T19:53:45,013 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:35797 2024-12-05T19:53:45,040 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:35797 connecting to ZooKeeper ensemble=127.0.0.1:60123 2024-12-05T19:53:45,071 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:357970x0, quorum=127.0.0.1:60123, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-05T19:53:45,074 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:35797-0x10063be64fe0000 connected 2024-12-05T19:53:45,101 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T19:53:45,104 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T19:53:45,114 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:35797-0x10063be64fe0000, quorum=127.0.0.1:60123, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-05T19:53:45,119 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:46315/user/jenkins/test-data/1a6465ef-adda-b6b8-36b8-07863dede557, hbase.cluster.distributed=false 2024-12-05T19:53:45,143 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:35797-0x10063be64fe0000, quorum=127.0.0.1:60123, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-05T19:53:45,147 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=35797 2024-12-05T19:53:45,148 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=35797 2024-12-05T19:53:45,148 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=35797 2024-12-05T19:53:45,156 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=35797 2024-12-05T19:53:45,156 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=35797 2024-12-05T19:53:45,272 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/86162e2766a8:0 server-side Connection retries=45 2024-12-05T19:53:45,274 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-05T19:53:45,275 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-05T19:53:45,275 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-05T19:53:45,275 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-05T19:53:45,275 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-05T19:53:45,278 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-05T19:53:45,281 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-05T19:53:45,282 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:44613 2024-12-05T19:53:45,284 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:44613 connecting to ZooKeeper ensemble=127.0.0.1:60123 2024-12-05T19:53:45,286 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T19:53:45,290 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T19:53:45,297 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:446130x0, quorum=127.0.0.1:60123, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-05T19:53:45,298 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:44613-0x10063be64fe0001 connected 2024-12-05T19:53:45,298 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44613-0x10063be64fe0001, quorum=127.0.0.1:60123, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-05T19:53:45,303 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-05T19:53:45,313 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-05T19:53:45,316 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44613-0x10063be64fe0001, quorum=127.0.0.1:60123, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-05T19:53:45,323 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44613-0x10063be64fe0001, quorum=127.0.0.1:60123, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-05T19:53:45,324 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=44613 2024-12-05T19:53:45,324 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=44613 2024-12-05T19:53:45,325 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=44613 2024-12-05T19:53:45,325 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=44613 2024-12-05T19:53:45,326 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=44613 2024-12-05T19:53:45,344 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/86162e2766a8:0 server-side Connection retries=45 2024-12-05T19:53:45,344 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-05T19:53:45,344 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-05T19:53:45,345 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-05T19:53:45,345 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-05T19:53:45,345 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-05T19:53:45,346 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-05T19:53:45,346 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-05T19:53:45,347 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:46093 2024-12-05T19:53:45,348 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:46093 connecting to ZooKeeper ensemble=127.0.0.1:60123 2024-12-05T19:53:45,349 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T19:53:45,354 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T19:53:45,363 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:460930x0, quorum=127.0.0.1:60123, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-05T19:53:45,364 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:46093-0x10063be64fe0002 connected 2024-12-05T19:53:45,364 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46093-0x10063be64fe0002, quorum=127.0.0.1:60123, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-05T19:53:45,365 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-05T19:53:45,370 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-05T19:53:45,371 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46093-0x10063be64fe0002, quorum=127.0.0.1:60123, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-05T19:53:45,373 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46093-0x10063be64fe0002, quorum=127.0.0.1:60123, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-05T19:53:45,383 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=46093 2024-12-05T19:53:45,384 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=46093 2024-12-05T19:53:45,385 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=46093 2024-12-05T19:53:45,388 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=46093 2024-12-05T19:53:45,392 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=46093 2024-12-05T19:53:45,409 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/86162e2766a8:0 server-side Connection retries=45 2024-12-05T19:53:45,410 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-05T19:53:45,410 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-05T19:53:45,410 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-05T19:53:45,410 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-05T19:53:45,410 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-05T19:53:45,410 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-05T19:53:45,411 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-05T19:53:45,412 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:35535 2024-12-05T19:53:45,413 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:35535 connecting to ZooKeeper ensemble=127.0.0.1:60123 2024-12-05T19:53:45,415 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T19:53:45,417 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T19:53:45,423 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:355350x0, quorum=127.0.0.1:60123, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-05T19:53:45,424 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:35535-0x10063be64fe0003 connected 2024-12-05T19:53:45,424 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35535-0x10063be64fe0003, quorum=127.0.0.1:60123, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-05T19:53:45,424 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-05T19:53:45,425 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-05T19:53:45,426 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35535-0x10063be64fe0003, quorum=127.0.0.1:60123, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-05T19:53:45,428 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35535-0x10063be64fe0003, quorum=127.0.0.1:60123, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-05T19:53:45,429 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=35535 2024-12-05T19:53:45,429 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=35535 2024-12-05T19:53:45,430 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=35535 2024-12-05T19:53:45,430 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=35535 2024-12-05T19:53:45,431 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=35535 2024-12-05T19:53:45,446 DEBUG [M:0;86162e2766a8:35797 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;86162e2766a8:35797 2024-12-05T19:53:45,447 INFO [master/86162e2766a8:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/86162e2766a8,35797,1733428424518 2024-12-05T19:53:45,454 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35797-0x10063be64fe0000, quorum=127.0.0.1:60123, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-05T19:53:45,454 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35535-0x10063be64fe0003, quorum=127.0.0.1:60123, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-05T19:53:45,454 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46093-0x10063be64fe0002, quorum=127.0.0.1:60123, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-05T19:53:45,454 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44613-0x10063be64fe0001, quorum=127.0.0.1:60123, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-05T19:53:45,457 DEBUG [master/86162e2766a8:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:35797-0x10063be64fe0000, quorum=127.0.0.1:60123, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/86162e2766a8,35797,1733428424518 2024-12-05T19:53:45,488 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44613-0x10063be64fe0001, quorum=127.0.0.1:60123, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-05T19:53:45,488 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35535-0x10063be64fe0003, quorum=127.0.0.1:60123, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-05T19:53:45,488 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46093-0x10063be64fe0002, quorum=127.0.0.1:60123, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-05T19:53:45,488 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35535-0x10063be64fe0003, quorum=127.0.0.1:60123, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T19:53:45,488 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35797-0x10063be64fe0000, quorum=127.0.0.1:60123, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T19:53:45,488 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44613-0x10063be64fe0001, quorum=127.0.0.1:60123, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T19:53:45,489 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46093-0x10063be64fe0002, quorum=127.0.0.1:60123, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T19:53:45,490 DEBUG [master/86162e2766a8:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:35797-0x10063be64fe0000, quorum=127.0.0.1:60123, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-05T19:53:45,492 INFO [master/86162e2766a8:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/86162e2766a8,35797,1733428424518 from backup master directory 2024-12-05T19:53:45,495 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35797-0x10063be64fe0000, quorum=127.0.0.1:60123, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/86162e2766a8,35797,1733428424518 2024-12-05T19:53:45,495 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46093-0x10063be64fe0002, quorum=127.0.0.1:60123, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-05T19:53:45,495 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35535-0x10063be64fe0003, quorum=127.0.0.1:60123, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-05T19:53:45,495 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44613-0x10063be64fe0001, quorum=127.0.0.1:60123, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-05T19:53:45,495 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35797-0x10063be64fe0000, quorum=127.0.0.1:60123, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-05T19:53:45,496 WARN [master/86162e2766a8:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-05T19:53:45,497 INFO [master/86162e2766a8:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=86162e2766a8,35797,1733428424518 2024-12-05T19:53:45,499 INFO [master/86162e2766a8:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-12-05T19:53:45,500 INFO [master/86162e2766a8:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-12-05T19:53:45,571 DEBUG [master/86162e2766a8:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:46315/user/jenkins/test-data/1a6465ef-adda-b6b8-36b8-07863dede557/hbase.id] with ID: 978dde15-16c2-4d9f-86a7-2daab0b203f1 2024-12-05T19:53:45,571 DEBUG [master/86162e2766a8:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:46315/user/jenkins/test-data/1a6465ef-adda-b6b8-36b8-07863dede557/.tmp/hbase.id 2024-12-05T19:53:45,579 WARN [master/86162e2766a8:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-05T19:53:45,579 WARN [master/86162e2766a8:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-05T19:53:45,583 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_919299418_22 at /127.0.0.1:34218 [Receiving block BP-1009711422-172.17.0.2-1733428421158:blk_-9223372036854775776_1003] {}] datanode.DataXceiver(331): 127.0.0.1:40207:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34218 dst: /127.0.0.1:40207 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-05T19:53:45,589 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40207 is added to blk_-9223372036854775776_1004 (size=42) 2024-12-05T19:53:45,590 WARN [master/86162e2766a8:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-05T19:53:45,591 DEBUG [master/86162e2766a8:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:46315/user/jenkins/test-data/1a6465ef-adda-b6b8-36b8-07863dede557/.tmp/hbase.id]:[hdfs://localhost:46315/user/jenkins/test-data/1a6465ef-adda-b6b8-36b8-07863dede557/hbase.id] 2024-12-05T19:53:45,638 INFO [master/86162e2766a8:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T19:53:45,643 INFO [master/86162e2766a8:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-05T19:53:45,663 INFO [master/86162e2766a8:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 18ms. 2024-12-05T19:53:45,667 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35535-0x10063be64fe0003, quorum=127.0.0.1:60123, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T19:53:45,667 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46093-0x10063be64fe0002, quorum=127.0.0.1:60123, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T19:53:45,667 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44613-0x10063be64fe0001, quorum=127.0.0.1:60123, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T19:53:45,667 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35797-0x10063be64fe0000, quorum=127.0.0.1:60123, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T19:53:45,680 WARN [master/86162e2766a8:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-05T19:53:45,681 WARN [master/86162e2766a8:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-05T19:53:45,684 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_919299418_22 at /127.0.0.1:51362 [Receiving block BP-1009711422-172.17.0.2-1733428421158:blk_-9223372036854775760_1005] {}] datanode.DataXceiver(331): 127.0.0.1:44813:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51362 dst: /127.0.0.1:44813 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-05T19:53:45,689 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_-9223372036854775760_1006 (size=196) 2024-12-05T19:53:45,691 WARN [master/86162e2766a8:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-05T19:53:45,709 INFO [master/86162e2766a8:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-05T19:53:45,711 INFO [master/86162e2766a8:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-05T19:53:45,718 INFO [master/86162e2766a8:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-05T19:53:45,756 WARN [master/86162e2766a8:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-05T19:53:45,756 WARN [master/86162e2766a8:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-05T19:53:45,760 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_919299418_22 at /127.0.0.1:34236 [Receiving block BP-1009711422-172.17.0.2-1733428421158:blk_-9223372036854775744_1007] {}] datanode.DataXceiver(331): 127.0.0.1:40207:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34236 dst: /127.0.0.1:40207 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-05T19:53:45,765 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40207 is added to blk_-9223372036854775744_1008 (size=1189) 2024-12-05T19:53:45,767 WARN [master/86162e2766a8:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-05T19:53:45,791 INFO [master/86162e2766a8:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:46315/user/jenkins/test-data/1a6465ef-adda-b6b8-36b8-07863dede557/MasterData/data/master/store 2024-12-05T19:53:45,810 WARN [master/86162e2766a8:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-05T19:53:45,810 WARN [master/86162e2766a8:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-05T19:53:45,815 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_919299418_22 at /127.0.0.1:51366 [Receiving block BP-1009711422-172.17.0.2-1733428421158:blk_-9223372036854775728_1009] {}] datanode.DataXceiver(331): 127.0.0.1:44813:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51366 dst: /127.0.0.1:44813 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-05T19:53:45,820 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_-9223372036854775728_1010 (size=34) 2024-12-05T19:53:45,821 WARN [master/86162e2766a8:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-05T19:53:45,826 INFO [master/86162e2766a8:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-12-05T19:53:45,829 DEBUG [master/86162e2766a8:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T19:53:45,830 DEBUG [master/86162e2766a8:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-05T19:53:45,830 INFO [master/86162e2766a8:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-05T19:53:45,830 DEBUG [master/86162e2766a8:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-05T19:53:45,832 DEBUG [master/86162e2766a8:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-05T19:53:45,832 DEBUG [master/86162e2766a8:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-05T19:53:45,832 INFO [master/86162e2766a8:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-05T19:53:45,833 DEBUG [master/86162e2766a8:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733428425830Disabling compacts and flushes for region at 1733428425830Disabling writes for close at 1733428425832 (+2 ms)Writing region close event to WAL at 1733428425832Closed at 1733428425832 2024-12-05T19:53:45,835 WARN [master/86162e2766a8:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:46315/user/jenkins/test-data/1a6465ef-adda-b6b8-36b8-07863dede557/MasterData/data/master/store/.initializing 2024-12-05T19:53:45,835 DEBUG [master/86162e2766a8:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:46315/user/jenkins/test-data/1a6465ef-adda-b6b8-36b8-07863dede557/MasterData/WALs/86162e2766a8,35797,1733428424518 2024-12-05T19:53:45,844 INFO [master/86162e2766a8:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-05T19:53:45,859 INFO [master/86162e2766a8:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=86162e2766a8%2C35797%2C1733428424518, suffix=, logDir=hdfs://localhost:46315/user/jenkins/test-data/1a6465ef-adda-b6b8-36b8-07863dede557/MasterData/WALs/86162e2766a8,35797,1733428424518, archiveDir=hdfs://localhost:46315/user/jenkins/test-data/1a6465ef-adda-b6b8-36b8-07863dede557/MasterData/oldWALs, maxLogs=10 2024-12-05T19:53:45,904 DEBUG [master/86162e2766a8:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/1a6465ef-adda-b6b8-36b8-07863dede557/MasterData/WALs/86162e2766a8,35797,1733428424518/86162e2766a8%2C35797%2C1733428424518.1733428425865, exclude list is [], retry=0 2024-12-05T19:53:45,928 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(244): No decryptEncryptedDataEncryptionKey method in DFSClient, should be hadoop version with HDFS-12396 java.lang.NoSuchMethodException: org.apache.hadoop.hdfs.DFSClient.decryptEncryptedDataEncryptionKey(org.apache.hadoop.fs.FileEncryptionInfo) at java.lang.Class.getDeclaredMethod(Class.java:2675) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelperWithoutHDFS12396(FanOutOneBlockAsyncDFSOutputSaslHelper.java:183) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelper(FanOutOneBlockAsyncDFSOutputSaslHelper.java:242) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.(FanOutOneBlockAsyncDFSOutputSaslHelper.java:253) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.initialize(FanOutOneBlockAsyncDFSOutputHelper.java:413) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper$5.operationComplete(FanOutOneBlockAsyncDFSOutputHelper.java:472) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper$5.operationComplete(FanOutOneBlockAsyncDFSOutputHelper.java:467) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.NettyFutureUtils.lambda$addListener$0(NettyFutureUtils.java:56) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListener0(DefaultPromise.java:590) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListeners0(DefaultPromise.java:583) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListenersNow(DefaultPromise.java:559) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListeners(DefaultPromise.java:492) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.setValue0(DefaultPromise.java:636) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.setSuccess0(DefaultPromise.java:625) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.trySuccess(DefaultPromise.java:105) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPromise.trySuccess(DefaultChannelPromise.java:84) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.fulfillConnectPromise(AbstractEpollChannel.java:658) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.finishConnect(AbstractEpollChannel.java:696) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.epollOutReady(AbstractEpollChannel.java:567) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.processReady(EpollEventLoop.java:491) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:399) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-05T19:53:45,929 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44813,DS-2cff1dd6-4cc4-4fde-8aea-38e4792115d1,DISK] 2024-12-05T19:53:45,929 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:37605,DS-5149af7e-c5ad-44aa-827f-2681ec8ad3f1,DISK] 2024-12-05T19:53:45,929 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:40207,DS-f41606fc-e486-4adc-b1c2-ef9ee721c920,DISK] 2024-12-05T19:53:45,933 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.ProtobufDecoder(117): Hadoop 3.3 and above shades protobuf. 2024-12-05T19:53:45,977 INFO [master/86162e2766a8:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/1a6465ef-adda-b6b8-36b8-07863dede557/MasterData/WALs/86162e2766a8,35797,1733428424518/86162e2766a8%2C35797%2C1733428424518.1733428425865 2024-12-05T19:53:45,978 DEBUG [master/86162e2766a8:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:44963:44963),(127.0.0.1/127.0.0.1:42277:42277),(127.0.0.1/127.0.0.1:39295:39295)] 2024-12-05T19:53:45,978 DEBUG [master/86162e2766a8:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-05T19:53:45,979 DEBUG [master/86162e2766a8:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T19:53:45,982 DEBUG [master/86162e2766a8:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-05T19:53:45,983 DEBUG [master/86162e2766a8:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-05T19:53:46,025 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-05T19:53:46,054 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-05T19:53:46,057 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T19:53:46,060 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-05T19:53:46,061 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-05T19:53:46,064 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-05T19:53:46,064 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T19:53:46,065 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T19:53:46,065 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-05T19:53:46,068 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-05T19:53:46,068 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T19:53:46,069 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T19:53:46,069 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-05T19:53:46,072 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-05T19:53:46,073 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T19:53:46,074 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T19:53:46,074 DEBUG [master/86162e2766a8:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-05T19:53:46,078 DEBUG [master/86162e2766a8:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46315/user/jenkins/test-data/1a6465ef-adda-b6b8-36b8-07863dede557/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-05T19:53:46,079 DEBUG [master/86162e2766a8:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46315/user/jenkins/test-data/1a6465ef-adda-b6b8-36b8-07863dede557/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-05T19:53:46,084 DEBUG [master/86162e2766a8:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-05T19:53:46,085 DEBUG [master/86162e2766a8:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-05T19:53:46,089 DEBUG [master/86162e2766a8:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-05T19:53:46,092 DEBUG [master/86162e2766a8:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-05T19:53:46,107 DEBUG [master/86162e2766a8:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46315/user/jenkins/test-data/1a6465ef-adda-b6b8-36b8-07863dede557/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-05T19:53:46,108 INFO [master/86162e2766a8:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=72268258, jitterRate=0.07688096165657043}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-05T19:53:46,115 DEBUG [master/86162e2766a8:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733428425996Initializing all the Stores at 1733428425998 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733428425999 (+1 ms)Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733428425999Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733428426000 (+1 ms)Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733428426000Cleaning up temporary data from old regions at 1733428426085 (+85 ms)Region opened successfully at 1733428426114 (+29 ms) 2024-12-05T19:53:46,117 INFO [master/86162e2766a8:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-05T19:53:46,122 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37605 is added to blk_-9223372036854775788_1002 (size=7) 2024-12-05T19:53:46,122 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_-9223372036854775789_1002 (size=7) 2024-12-05T19:53:46,156 DEBUG [master/86162e2766a8:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@ec1329b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=86162e2766a8/172.17.0.2:0 2024-12-05T19:53:46,189 INFO [master/86162e2766a8:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-05T19:53:46,202 INFO [master/86162e2766a8:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-05T19:53:46,202 INFO [master/86162e2766a8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-05T19:53:46,205 INFO [master/86162e2766a8:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-05T19:53:46,207 INFO [master/86162e2766a8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 1 msec 2024-12-05T19:53:46,212 INFO [master/86162e2766a8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 5 msec 2024-12-05T19:53:46,212 INFO [master/86162e2766a8:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-05T19:53:46,240 INFO [master/86162e2766a8:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-05T19:53:46,250 DEBUG [master/86162e2766a8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35797-0x10063be64fe0000, quorum=127.0.0.1:60123, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-05T19:53:46,252 DEBUG [master/86162e2766a8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-05T19:53:46,255 INFO [master/86162e2766a8:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-05T19:53:46,256 DEBUG [master/86162e2766a8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35797-0x10063be64fe0000, quorum=127.0.0.1:60123, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-05T19:53:46,258 DEBUG [master/86162e2766a8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-05T19:53:46,260 INFO [master/86162e2766a8:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-05T19:53:46,263 DEBUG [master/86162e2766a8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35797-0x10063be64fe0000, quorum=127.0.0.1:60123, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-05T19:53:46,265 DEBUG [master/86162e2766a8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-05T19:53:46,266 DEBUG [master/86162e2766a8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35797-0x10063be64fe0000, quorum=127.0.0.1:60123, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-05T19:53:46,268 DEBUG [master/86162e2766a8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-05T19:53:46,286 DEBUG [master/86162e2766a8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35797-0x10063be64fe0000, quorum=127.0.0.1:60123, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-05T19:53:46,287 DEBUG [master/86162e2766a8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-05T19:53:46,291 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35535-0x10063be64fe0003, quorum=127.0.0.1:60123, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-05T19:53:46,291 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35797-0x10063be64fe0000, quorum=127.0.0.1:60123, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-05T19:53:46,291 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35797-0x10063be64fe0000, quorum=127.0.0.1:60123, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T19:53:46,291 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46093-0x10063be64fe0002, quorum=127.0.0.1:60123, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-05T19:53:46,291 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35535-0x10063be64fe0003, quorum=127.0.0.1:60123, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T19:53:46,292 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46093-0x10063be64fe0002, quorum=127.0.0.1:60123, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T19:53:46,292 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44613-0x10063be64fe0001, quorum=127.0.0.1:60123, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-05T19:53:46,292 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44613-0x10063be64fe0001, quorum=127.0.0.1:60123, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T19:53:46,295 INFO [master/86162e2766a8:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=86162e2766a8,35797,1733428424518, sessionid=0x10063be64fe0000, setting cluster-up flag (Was=false) 2024-12-05T19:53:46,307 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46093-0x10063be64fe0002, quorum=127.0.0.1:60123, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T19:53:46,307 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44613-0x10063be64fe0001, quorum=127.0.0.1:60123, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T19:53:46,307 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35797-0x10063be64fe0000, quorum=127.0.0.1:60123, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T19:53:46,307 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35535-0x10063be64fe0003, quorum=127.0.0.1:60123, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T19:53:46,313 DEBUG [master/86162e2766a8:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-05T19:53:46,315 DEBUG [master/86162e2766a8:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=86162e2766a8,35797,1733428424518 2024-12-05T19:53:46,320 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35535-0x10063be64fe0003, quorum=127.0.0.1:60123, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T19:53:46,320 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35797-0x10063be64fe0000, quorum=127.0.0.1:60123, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T19:53:46,320 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46093-0x10063be64fe0002, quorum=127.0.0.1:60123, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T19:53:46,320 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44613-0x10063be64fe0001, quorum=127.0.0.1:60123, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T19:53:46,327 DEBUG [master/86162e2766a8:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-05T19:53:46,328 DEBUG [master/86162e2766a8:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=86162e2766a8,35797,1733428424518 2024-12-05T19:53:46,335 INFO [master/86162e2766a8:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:46315/user/jenkins/test-data/1a6465ef-adda-b6b8-36b8-07863dede557/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-05T19:53:46,416 DEBUG [master/86162e2766a8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-05T19:53:46,427 INFO [master/86162e2766a8:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-05T19:53:46,434 INFO [master/86162e2766a8:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-05T19:53:46,435 INFO [RS:2;86162e2766a8:35535 {}] regionserver.HRegionServer(746): ClusterId : 978dde15-16c2-4d9f-86a7-2daab0b203f1 2024-12-05T19:53:46,435 INFO [RS:0;86162e2766a8:44613 {}] regionserver.HRegionServer(746): ClusterId : 978dde15-16c2-4d9f-86a7-2daab0b203f1 2024-12-05T19:53:46,435 INFO [RS:1;86162e2766a8:46093 {}] regionserver.HRegionServer(746): ClusterId : 978dde15-16c2-4d9f-86a7-2daab0b203f1 2024-12-05T19:53:46,439 DEBUG [RS:1;86162e2766a8:46093 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-05T19:53:46,439 DEBUG [RS:0;86162e2766a8:44613 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-05T19:53:46,439 DEBUG [RS:2;86162e2766a8:35535 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-05T19:53:46,441 DEBUG [master/86162e2766a8:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 86162e2766a8,35797,1733428424518 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-05T19:53:46,445 DEBUG [RS:2;86162e2766a8:35535 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-05T19:53:46,445 DEBUG [RS:1;86162e2766a8:46093 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-05T19:53:46,445 DEBUG [RS:0;86162e2766a8:44613 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-05T19:53:46,446 DEBUG [RS:2;86162e2766a8:35535 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-05T19:53:46,446 DEBUG [RS:0;86162e2766a8:44613 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-05T19:53:46,446 DEBUG [RS:1;86162e2766a8:46093 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-05T19:53:46,450 DEBUG [RS:1;86162e2766a8:46093 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-05T19:53:46,450 DEBUG [RS:0;86162e2766a8:44613 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-05T19:53:46,450 DEBUG [RS:2;86162e2766a8:35535 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-05T19:53:46,451 DEBUG [RS:0;86162e2766a8:44613 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@21eacd71, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=86162e2766a8/172.17.0.2:0 2024-12-05T19:53:46,451 DEBUG [RS:1;86162e2766a8:46093 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@68ac7dc5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=86162e2766a8/172.17.0.2:0 2024-12-05T19:53:46,451 DEBUG [RS:2;86162e2766a8:35535 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@849d51a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=86162e2766a8/172.17.0.2:0 2024-12-05T19:53:46,452 DEBUG [master/86162e2766a8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/86162e2766a8:0, corePoolSize=5, maxPoolSize=5 2024-12-05T19:53:46,452 DEBUG [master/86162e2766a8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/86162e2766a8:0, corePoolSize=5, maxPoolSize=5 2024-12-05T19:53:46,452 DEBUG [master/86162e2766a8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/86162e2766a8:0, corePoolSize=5, maxPoolSize=5 2024-12-05T19:53:46,453 DEBUG [master/86162e2766a8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/86162e2766a8:0, corePoolSize=5, maxPoolSize=5 2024-12-05T19:53:46,453 DEBUG [master/86162e2766a8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/86162e2766a8:0, corePoolSize=10, maxPoolSize=10 2024-12-05T19:53:46,453 DEBUG [master/86162e2766a8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/86162e2766a8:0, corePoolSize=1, maxPoolSize=1 2024-12-05T19:53:46,453 DEBUG [master/86162e2766a8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/86162e2766a8:0, corePoolSize=2, maxPoolSize=2 2024-12-05T19:53:46,453 DEBUG [master/86162e2766a8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/86162e2766a8:0, corePoolSize=1, maxPoolSize=1 2024-12-05T19:53:46,467 INFO [master/86162e2766a8:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733428456466 2024-12-05T19:53:46,468 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-05T19:53:46,468 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-05T19:53:46,469 INFO [master/86162e2766a8:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-05T19:53:46,470 INFO [master/86162e2766a8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-05T19:53:46,473 DEBUG [RS:0;86162e2766a8:44613 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;86162e2766a8:44613 2024-12-05T19:53:46,474 INFO [master/86162e2766a8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-05T19:53:46,474 DEBUG [RS:1;86162e2766a8:46093 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;86162e2766a8:46093 2024-12-05T19:53:46,475 DEBUG [RS:2;86162e2766a8:35535 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:2;86162e2766a8:35535 2024-12-05T19:53:46,475 INFO [master/86162e2766a8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-05T19:53:46,475 INFO [master/86162e2766a8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-05T19:53:46,475 INFO [master/86162e2766a8:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-05T19:53:46,476 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T19:53:46,476 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-05T19:53:46,477 INFO [RS:0;86162e2766a8:44613 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-05T19:53:46,477 INFO [RS:2;86162e2766a8:35535 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-05T19:53:46,477 INFO [RS:1;86162e2766a8:46093 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-05T19:53:46,477 INFO [RS:0;86162e2766a8:44613 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-05T19:53:46,477 INFO [RS:1;86162e2766a8:46093 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-05T19:53:46,477 INFO [RS:2;86162e2766a8:35535 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-05T19:53:46,477 DEBUG [RS:0;86162e2766a8:44613 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-05T19:53:46,478 DEBUG [RS:2;86162e2766a8:35535 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-05T19:53:46,478 DEBUG [RS:1;86162e2766a8:46093 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-05T19:53:46,476 INFO [master/86162e2766a8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-05T19:53:46,481 INFO [RS:0;86162e2766a8:44613 {}] regionserver.HRegionServer(2659): reportForDuty to master=86162e2766a8,35797,1733428424518 with port=44613, startcode=1733428425231 2024-12-05T19:53:46,481 INFO [RS:1;86162e2766a8:46093 {}] regionserver.HRegionServer(2659): reportForDuty to master=86162e2766a8,35797,1733428424518 with port=46093, startcode=1733428425343 2024-12-05T19:53:46,481 INFO [RS:2;86162e2766a8:35535 {}] regionserver.HRegionServer(2659): reportForDuty to master=86162e2766a8,35797,1733428424518 with port=35535, startcode=1733428425409 2024-12-05T19:53:46,484 INFO [master/86162e2766a8:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-05T19:53:46,485 INFO [master/86162e2766a8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-05T19:53:46,486 INFO [master/86162e2766a8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-05T19:53:46,493 DEBUG [RS:1;86162e2766a8:46093 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-05T19:53:46,493 DEBUG [RS:0;86162e2766a8:44613 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-05T19:53:46,493 DEBUG [RS:2;86162e2766a8:35535 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-05T19:53:46,496 INFO [master/86162e2766a8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-05T19:53:46,496 INFO [master/86162e2766a8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-05T19:53:46,500 DEBUG [master/86162e2766a8:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/86162e2766a8:0:becomeActiveMaster-HFileCleaner.large.0-1733428426498,5,FailOnTimeoutGroup] 2024-12-05T19:53:46,502 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-05T19:53:46,503 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-05T19:53:46,508 DEBUG [master/86162e2766a8:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/86162e2766a8:0:becomeActiveMaster-HFileCleaner.small.0-1733428426500,5,FailOnTimeoutGroup] 2024-12-05T19:53:46,508 INFO [master/86162e2766a8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-05T19:53:46,508 INFO [master/86162e2766a8:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-05T19:53:46,509 INFO [master/86162e2766a8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-05T19:53:46,510 INFO [master/86162e2766a8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-05T19:53:46,520 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_919299418_22 at /127.0.0.1:34256 [Receiving block BP-1009711422-172.17.0.2-1733428421158:blk_-9223372036854775712_1012] {}] datanode.DataXceiver(331): 127.0.0.1:40207:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34256 dst: /127.0.0.1:40207 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-05T19:53:46,532 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40207 is added to blk_-9223372036854775712_1013 (size=1321) 2024-12-05T19:53:46,534 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-05T19:53:46,535 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:46315/user/jenkins/test-data/1a6465ef-adda-b6b8-36b8-07863dede557/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-05T19:53:46,536 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:46315/user/jenkins/test-data/1a6465ef-adda-b6b8-36b8-07863dede557 2024-12-05T19:53:46,544 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56667, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-12-05T19:53:46,544 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40743, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-12-05T19:53:46,544 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58717, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-12-05T19:53:46,544 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-05T19:53:46,545 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-05T19:53:46,550 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35797 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 86162e2766a8,46093,1733428425343 2024-12-05T19:53:46,553 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35797 {}] master.ServerManager(517): Registering regionserver=86162e2766a8,46093,1733428425343 2024-12-05T19:53:46,558 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_919299418_22 at /127.0.0.1:51390 [Receiving block BP-1009711422-172.17.0.2-1733428421158:blk_-9223372036854775696_1014] {}] datanode.DataXceiver(331): 127.0.0.1:44813:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51390 dst: /127.0.0.1:44813 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-05T19:53:46,564 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_-9223372036854775696_1015 (size=32) 2024-12-05T19:53:46,565 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-05T19:53:46,566 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T19:53:46,566 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35797 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 86162e2766a8,44613,1733428425231 2024-12-05T19:53:46,567 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35797 {}] master.ServerManager(517): Registering regionserver=86162e2766a8,44613,1733428425231 2024-12-05T19:53:46,570 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-05T19:53:46,571 DEBUG [RS:1;86162e2766a8:46093 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:46315/user/jenkins/test-data/1a6465ef-adda-b6b8-36b8-07863dede557 2024-12-05T19:53:46,571 DEBUG [RS:1;86162e2766a8:46093 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:46315 2024-12-05T19:53:46,571 DEBUG [RS:1;86162e2766a8:46093 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-05T19:53:46,572 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35797 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 86162e2766a8,35535,1733428425409 2024-12-05T19:53:46,573 DEBUG [RS:0;86162e2766a8:44613 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:46315/user/jenkins/test-data/1a6465ef-adda-b6b8-36b8-07863dede557 2024-12-05T19:53:46,573 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-05T19:53:46,573 DEBUG [RS:0;86162e2766a8:44613 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:46315 2024-12-05T19:53:46,573 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35797 {}] master.ServerManager(517): Registering regionserver=86162e2766a8,35535,1733428425409 2024-12-05T19:53:46,573 DEBUG [RS:0;86162e2766a8:44613 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-05T19:53:46,573 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T19:53:46,576 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35797-0x10063be64fe0000, quorum=127.0.0.1:60123, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-05T19:53:46,577 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-05T19:53:46,577 DEBUG [RS:1;86162e2766a8:46093 {}] zookeeper.ZKUtil(111): regionserver:46093-0x10063be64fe0002, quorum=127.0.0.1:60123, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/86162e2766a8,46093,1733428425343 2024-12-05T19:53:46,577 WARN [RS:1;86162e2766a8:46093 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-05T19:53:46,577 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-05T19:53:46,577 INFO [RS:1;86162e2766a8:46093 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-05T19:53:46,577 DEBUG [RS:1;86162e2766a8:46093 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:46315/user/jenkins/test-data/1a6465ef-adda-b6b8-36b8-07863dede557/WALs/86162e2766a8,46093,1733428425343 2024-12-05T19:53:46,579 DEBUG [RS:2;86162e2766a8:35535 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:46315/user/jenkins/test-data/1a6465ef-adda-b6b8-36b8-07863dede557 2024-12-05T19:53:46,579 DEBUG [RS:2;86162e2766a8:35535 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:46315 2024-12-05T19:53:46,579 DEBUG [RS:2;86162e2766a8:35535 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-05T19:53:46,581 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35797-0x10063be64fe0000, quorum=127.0.0.1:60123, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-05T19:53:46,581 DEBUG [RS:0;86162e2766a8:44613 {}] zookeeper.ZKUtil(111): regionserver:44613-0x10063be64fe0001, quorum=127.0.0.1:60123, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/86162e2766a8,44613,1733428425231 2024-12-05T19:53:46,581 WARN [RS:0;86162e2766a8:44613 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-05T19:53:46,582 INFO [RS:0;86162e2766a8:44613 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-05T19:53:46,582 DEBUG [RS:0;86162e2766a8:44613 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:46315/user/jenkins/test-data/1a6465ef-adda-b6b8-36b8-07863dede557/WALs/86162e2766a8,44613,1733428425231 2024-12-05T19:53:46,582 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [86162e2766a8,46093,1733428425343] 2024-12-05T19:53:46,582 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [86162e2766a8,44613,1733428425231] 2024-12-05T19:53:46,584 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-05T19:53:46,584 DEBUG [RS:2;86162e2766a8:35535 {}] zookeeper.ZKUtil(111): regionserver:35535-0x10063be64fe0003, quorum=127.0.0.1:60123, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/86162e2766a8,35535,1733428425409 2024-12-05T19:53:46,585 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [86162e2766a8,35535,1733428425409] 2024-12-05T19:53:46,585 WARN [RS:2;86162e2766a8:35535 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-05T19:53:46,585 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T19:53:46,585 INFO [RS:2;86162e2766a8:35535 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-05T19:53:46,585 DEBUG [RS:2;86162e2766a8:35535 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:46315/user/jenkins/test-data/1a6465ef-adda-b6b8-36b8-07863dede557/WALs/86162e2766a8,35535,1733428425409 2024-12-05T19:53:46,586 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-05T19:53:46,586 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-05T19:53:46,589 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-05T19:53:46,589 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T19:53:46,591 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-05T19:53:46,591 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-05T19:53:46,595 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-05T19:53:46,595 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T19:53:46,596 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-05T19:53:46,596 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-05T19:53:46,598 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46315/user/jenkins/test-data/1a6465ef-adda-b6b8-36b8-07863dede557/data/hbase/meta/1588230740 2024-12-05T19:53:46,599 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46315/user/jenkins/test-data/1a6465ef-adda-b6b8-36b8-07863dede557/data/hbase/meta/1588230740 2024-12-05T19:53:46,602 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-05T19:53:46,602 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-05T19:53:46,603 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-05T19:53:46,605 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-05T19:53:46,613 INFO [RS:1;86162e2766a8:46093 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-05T19:53:46,613 INFO [RS:0;86162e2766a8:44613 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-05T19:53:46,613 INFO [RS:2;86162e2766a8:35535 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-05T19:53:46,622 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46315/user/jenkins/test-data/1a6465ef-adda-b6b8-36b8-07863dede557/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-05T19:53:46,623 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=68914759, jitterRate=0.026909932494163513}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-05T19:53:46,626 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733428426566Initializing all the Stores at 1733428426568 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733428426568Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733428426569 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733428426569Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733428426570 (+1 ms)Cleaning up temporary data from old regions at 1733428426602 (+32 ms)Region opened successfully at 1733428426626 (+24 ms) 2024-12-05T19:53:46,626 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-05T19:53:46,626 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-05T19:53:46,626 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-05T19:53:46,626 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-05T19:53:46,627 INFO [RS:0;86162e2766a8:44613 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-05T19:53:46,628 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-05T19:53:46,631 INFO [RS:1;86162e2766a8:46093 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-05T19:53:46,632 INFO [RS:2;86162e2766a8:35535 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-05T19:53:46,632 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-05T19:53:46,632 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733428426626Disabling compacts and flushes for region at 1733428426626Disabling writes for close at 1733428426628 (+2 ms)Writing region close event to WAL at 1733428426631 (+3 ms)Closed at 1733428426632 (+1 ms) 2024-12-05T19:53:46,637 INFO [RS:0;86162e2766a8:44613 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-05T19:53:46,637 INFO [RS:2;86162e2766a8:35535 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-05T19:53:46,637 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-05T19:53:46,637 INFO [RS:1;86162e2766a8:46093 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-05T19:53:46,637 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-05T19:53:46,637 INFO [RS:0;86162e2766a8:44613 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-05T19:53:46,637 INFO [RS:2;86162e2766a8:35535 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-05T19:53:46,637 INFO [RS:1;86162e2766a8:46093 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-05T19:53:46,638 INFO [RS:0;86162e2766a8:44613 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-05T19:53:46,638 INFO [RS:2;86162e2766a8:35535 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-05T19:53:46,638 INFO [RS:1;86162e2766a8:46093 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-05T19:53:46,644 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-05T19:53:46,646 INFO [RS:1;86162e2766a8:46093 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-05T19:53:46,646 INFO [RS:2;86162e2766a8:35535 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-05T19:53:46,647 INFO [RS:0;86162e2766a8:44613 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-05T19:53:46,648 INFO [RS:1;86162e2766a8:46093 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-05T19:53:46,648 INFO [RS:0;86162e2766a8:44613 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-05T19:53:46,648 INFO [RS:2;86162e2766a8:35535 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-05T19:53:46,649 DEBUG [RS:1;86162e2766a8:46093 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/86162e2766a8:0, corePoolSize=1, maxPoolSize=1 2024-12-05T19:53:46,649 DEBUG [RS:2;86162e2766a8:35535 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/86162e2766a8:0, corePoolSize=1, maxPoolSize=1 2024-12-05T19:53:46,649 DEBUG [RS:1;86162e2766a8:46093 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/86162e2766a8:0, corePoolSize=1, maxPoolSize=1 2024-12-05T19:53:46,649 DEBUG [RS:2;86162e2766a8:35535 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/86162e2766a8:0, corePoolSize=1, maxPoolSize=1 2024-12-05T19:53:46,649 DEBUG [RS:1;86162e2766a8:46093 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/86162e2766a8:0, corePoolSize=1, maxPoolSize=1 2024-12-05T19:53:46,649 DEBUG [RS:0;86162e2766a8:44613 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/86162e2766a8:0, corePoolSize=1, maxPoolSize=1 2024-12-05T19:53:46,649 DEBUG [RS:2;86162e2766a8:35535 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/86162e2766a8:0, corePoolSize=1, maxPoolSize=1 2024-12-05T19:53:46,649 DEBUG [RS:1;86162e2766a8:46093 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/86162e2766a8:0, corePoolSize=1, maxPoolSize=1 2024-12-05T19:53:46,649 DEBUG [RS:0;86162e2766a8:44613 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/86162e2766a8:0, corePoolSize=1, maxPoolSize=1 2024-12-05T19:53:46,649 DEBUG [RS:2;86162e2766a8:35535 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/86162e2766a8:0, corePoolSize=1, maxPoolSize=1 2024-12-05T19:53:46,649 DEBUG [RS:1;86162e2766a8:46093 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/86162e2766a8:0, corePoolSize=1, maxPoolSize=1 2024-12-05T19:53:46,649 DEBUG [RS:0;86162e2766a8:44613 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/86162e2766a8:0, corePoolSize=1, maxPoolSize=1 2024-12-05T19:53:46,649 DEBUG [RS:2;86162e2766a8:35535 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/86162e2766a8:0, corePoolSize=1, maxPoolSize=1 2024-12-05T19:53:46,649 DEBUG [RS:1;86162e2766a8:46093 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/86162e2766a8:0, corePoolSize=2, maxPoolSize=2 2024-12-05T19:53:46,650 DEBUG [RS:0;86162e2766a8:44613 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/86162e2766a8:0, corePoolSize=1, maxPoolSize=1 2024-12-05T19:53:46,650 DEBUG [RS:1;86162e2766a8:46093 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/86162e2766a8:0, corePoolSize=1, maxPoolSize=1 2024-12-05T19:53:46,650 DEBUG [RS:2;86162e2766a8:35535 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/86162e2766a8:0, corePoolSize=2, maxPoolSize=2 2024-12-05T19:53:46,650 DEBUG [RS:0;86162e2766a8:44613 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/86162e2766a8:0, corePoolSize=1, maxPoolSize=1 2024-12-05T19:53:46,650 DEBUG [RS:1;86162e2766a8:46093 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/86162e2766a8:0, corePoolSize=1, maxPoolSize=1 2024-12-05T19:53:46,650 DEBUG [RS:2;86162e2766a8:35535 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/86162e2766a8:0, corePoolSize=1, maxPoolSize=1 2024-12-05T19:53:46,650 DEBUG [RS:0;86162e2766a8:44613 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/86162e2766a8:0, corePoolSize=2, maxPoolSize=2 2024-12-05T19:53:46,650 DEBUG [RS:1;86162e2766a8:46093 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/86162e2766a8:0, corePoolSize=1, maxPoolSize=1 2024-12-05T19:53:46,650 DEBUG [RS:1;86162e2766a8:46093 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/86162e2766a8:0, corePoolSize=1, maxPoolSize=1 2024-12-05T19:53:46,650 DEBUG [RS:0;86162e2766a8:44613 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/86162e2766a8:0, corePoolSize=1, maxPoolSize=1 2024-12-05T19:53:46,650 DEBUG [RS:2;86162e2766a8:35535 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/86162e2766a8:0, corePoolSize=1, maxPoolSize=1 2024-12-05T19:53:46,650 DEBUG [RS:1;86162e2766a8:46093 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/86162e2766a8:0, corePoolSize=1, maxPoolSize=1 2024-12-05T19:53:46,650 DEBUG [RS:0;86162e2766a8:44613 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/86162e2766a8:0, corePoolSize=1, maxPoolSize=1 2024-12-05T19:53:46,651 DEBUG [RS:2;86162e2766a8:35535 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/86162e2766a8:0, corePoolSize=1, maxPoolSize=1 2024-12-05T19:53:46,651 DEBUG [RS:1;86162e2766a8:46093 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/86162e2766a8:0, corePoolSize=1, maxPoolSize=1 2024-12-05T19:53:46,651 DEBUG [RS:0;86162e2766a8:44613 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/86162e2766a8:0, corePoolSize=1, maxPoolSize=1 2024-12-05T19:53:46,651 DEBUG [RS:1;86162e2766a8:46093 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/86162e2766a8:0, corePoolSize=3, maxPoolSize=3 2024-12-05T19:53:46,651 DEBUG [RS:0;86162e2766a8:44613 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/86162e2766a8:0, corePoolSize=1, maxPoolSize=1 2024-12-05T19:53:46,651 DEBUG [RS:2;86162e2766a8:35535 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/86162e2766a8:0, corePoolSize=1, maxPoolSize=1 2024-12-05T19:53:46,651 DEBUG [RS:1;86162e2766a8:46093 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/86162e2766a8:0, corePoolSize=3, maxPoolSize=3 2024-12-05T19:53:46,651 DEBUG [RS:0;86162e2766a8:44613 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/86162e2766a8:0, corePoolSize=1, maxPoolSize=1 2024-12-05T19:53:46,651 DEBUG [RS:2;86162e2766a8:35535 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/86162e2766a8:0, corePoolSize=1, maxPoolSize=1 2024-12-05T19:53:46,651 DEBUG [RS:0;86162e2766a8:44613 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/86162e2766a8:0, corePoolSize=1, maxPoolSize=1 2024-12-05T19:53:46,651 DEBUG [RS:2;86162e2766a8:35535 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/86162e2766a8:0, corePoolSize=1, maxPoolSize=1 2024-12-05T19:53:46,651 DEBUG [RS:0;86162e2766a8:44613 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/86162e2766a8:0, corePoolSize=3, maxPoolSize=3 2024-12-05T19:53:46,652 DEBUG [RS:2;86162e2766a8:35535 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/86162e2766a8:0, corePoolSize=3, maxPoolSize=3 2024-12-05T19:53:46,652 DEBUG [RS:0;86162e2766a8:44613 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/86162e2766a8:0, corePoolSize=3, maxPoolSize=3 2024-12-05T19:53:46,652 DEBUG [RS:2;86162e2766a8:35535 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/86162e2766a8:0, corePoolSize=3, maxPoolSize=3 2024-12-05T19:53:46,655 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-05T19:53:46,659 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-05T19:53:46,664 INFO [RS:1;86162e2766a8:46093 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-05T19:53:46,664 INFO [RS:1;86162e2766a8:46093 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-05T19:53:46,664 INFO [RS:1;86162e2766a8:46093 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-05T19:53:46,664 INFO [RS:1;86162e2766a8:46093 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-05T19:53:46,665 INFO [RS:1;86162e2766a8:46093 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-05T19:53:46,665 INFO [RS:1;86162e2766a8:46093 {}] hbase.ChoreService(168): Chore ScheduledChore name=86162e2766a8,46093,1733428425343-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-05T19:53:46,666 INFO [RS:0;86162e2766a8:44613 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-05T19:53:46,666 INFO [RS:2;86162e2766a8:35535 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-05T19:53:46,667 INFO [RS:2;86162e2766a8:35535 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-05T19:53:46,667 INFO [RS:2;86162e2766a8:35535 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-05T19:53:46,667 INFO [RS:2;86162e2766a8:35535 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-05T19:53:46,667 INFO [RS:0;86162e2766a8:44613 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-05T19:53:46,667 INFO [RS:2;86162e2766a8:35535 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-05T19:53:46,667 INFO [RS:0;86162e2766a8:44613 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-05T19:53:46,667 INFO [RS:2;86162e2766a8:35535 {}] hbase.ChoreService(168): Chore ScheduledChore name=86162e2766a8,35535,1733428425409-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-05T19:53:46,667 INFO [RS:0;86162e2766a8:44613 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-05T19:53:46,667 INFO [RS:0;86162e2766a8:44613 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-05T19:53:46,667 INFO [RS:0;86162e2766a8:44613 {}] hbase.ChoreService(168): Chore ScheduledChore name=86162e2766a8,44613,1733428425231-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-05T19:53:46,698 INFO [RS:2;86162e2766a8:35535 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-05T19:53:46,698 INFO [RS:1;86162e2766a8:46093 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-05T19:53:46,698 INFO [RS:0;86162e2766a8:44613 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-05T19:53:46,701 INFO [RS:2;86162e2766a8:35535 {}] hbase.ChoreService(168): Chore ScheduledChore name=86162e2766a8,35535,1733428425409-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-05T19:53:46,701 INFO [RS:0;86162e2766a8:44613 {}] hbase.ChoreService(168): Chore ScheduledChore name=86162e2766a8,44613,1733428425231-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-05T19:53:46,701 INFO [RS:1;86162e2766a8:46093 {}] hbase.ChoreService(168): Chore ScheduledChore name=86162e2766a8,46093,1733428425343-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-05T19:53:46,701 INFO [RS:2;86162e2766a8:35535 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-05T19:53:46,701 INFO [RS:1;86162e2766a8:46093 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-05T19:53:46,701 INFO [RS:2;86162e2766a8:35535 {}] regionserver.Replication(171): 86162e2766a8,35535,1733428425409 started 2024-12-05T19:53:46,701 INFO [RS:1;86162e2766a8:46093 {}] regionserver.Replication(171): 86162e2766a8,46093,1733428425343 started 2024-12-05T19:53:46,701 INFO [RS:0;86162e2766a8:44613 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-05T19:53:46,702 INFO [RS:0;86162e2766a8:44613 {}] regionserver.Replication(171): 86162e2766a8,44613,1733428425231 started 2024-12-05T19:53:46,731 INFO [RS:2;86162e2766a8:35535 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-05T19:53:46,731 INFO [RS:0;86162e2766a8:44613 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-05T19:53:46,731 INFO [RS:1;86162e2766a8:46093 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-05T19:53:46,731 INFO [RS:2;86162e2766a8:35535 {}] regionserver.HRegionServer(1482): Serving as 86162e2766a8,35535,1733428425409, RpcServer on 86162e2766a8/172.17.0.2:35535, sessionid=0x10063be64fe0003 2024-12-05T19:53:46,731 INFO [RS:0;86162e2766a8:44613 {}] regionserver.HRegionServer(1482): Serving as 86162e2766a8,44613,1733428425231, RpcServer on 86162e2766a8/172.17.0.2:44613, sessionid=0x10063be64fe0001 2024-12-05T19:53:46,731 INFO [RS:1;86162e2766a8:46093 {}] regionserver.HRegionServer(1482): Serving as 86162e2766a8,46093,1733428425343, RpcServer on 86162e2766a8/172.17.0.2:46093, sessionid=0x10063be64fe0002 2024-12-05T19:53:46,732 DEBUG [RS:1;86162e2766a8:46093 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-05T19:53:46,732 DEBUG [RS:2;86162e2766a8:35535 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-05T19:53:46,732 DEBUG [RS:1;86162e2766a8:46093 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 86162e2766a8,46093,1733428425343 2024-12-05T19:53:46,732 DEBUG [RS:0;86162e2766a8:44613 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-05T19:53:46,732 DEBUG [RS:2;86162e2766a8:35535 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 86162e2766a8,35535,1733428425409 2024-12-05T19:53:46,733 DEBUG [RS:0;86162e2766a8:44613 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 86162e2766a8,44613,1733428425231 2024-12-05T19:53:46,733 DEBUG [RS:1;86162e2766a8:46093 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '86162e2766a8,46093,1733428425343' 2024-12-05T19:53:46,733 DEBUG [RS:0;86162e2766a8:44613 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '86162e2766a8,44613,1733428425231' 2024-12-05T19:53:46,733 DEBUG [RS:2;86162e2766a8:35535 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '86162e2766a8,35535,1733428425409' 2024-12-05T19:53:46,733 DEBUG [RS:1;86162e2766a8:46093 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-05T19:53:46,733 DEBUG [RS:2;86162e2766a8:35535 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-05T19:53:46,733 DEBUG [RS:0;86162e2766a8:44613 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-05T19:53:46,734 DEBUG [RS:1;86162e2766a8:46093 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-05T19:53:46,734 DEBUG [RS:0;86162e2766a8:44613 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-05T19:53:46,734 DEBUG [RS:2;86162e2766a8:35535 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-05T19:53:46,735 DEBUG [RS:1;86162e2766a8:46093 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-05T19:53:46,735 DEBUG [RS:0;86162e2766a8:44613 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-05T19:53:46,735 DEBUG [RS:1;86162e2766a8:46093 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-05T19:53:46,735 DEBUG [RS:2;86162e2766a8:35535 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-05T19:53:46,735 DEBUG [RS:0;86162e2766a8:44613 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-05T19:53:46,735 DEBUG [RS:2;86162e2766a8:35535 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-05T19:53:46,735 DEBUG [RS:1;86162e2766a8:46093 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 86162e2766a8,46093,1733428425343 2024-12-05T19:53:46,735 DEBUG [RS:0;86162e2766a8:44613 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 86162e2766a8,44613,1733428425231 2024-12-05T19:53:46,735 DEBUG [RS:2;86162e2766a8:35535 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 86162e2766a8,35535,1733428425409 2024-12-05T19:53:46,735 DEBUG [RS:1;86162e2766a8:46093 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '86162e2766a8,46093,1733428425343' 2024-12-05T19:53:46,735 DEBUG [RS:2;86162e2766a8:35535 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '86162e2766a8,35535,1733428425409' 2024-12-05T19:53:46,735 DEBUG [RS:0;86162e2766a8:44613 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '86162e2766a8,44613,1733428425231' 2024-12-05T19:53:46,735 DEBUG [RS:1;86162e2766a8:46093 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-05T19:53:46,735 DEBUG [RS:2;86162e2766a8:35535 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-05T19:53:46,735 DEBUG [RS:0;86162e2766a8:44613 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-05T19:53:46,736 DEBUG [RS:1;86162e2766a8:46093 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-05T19:53:46,736 DEBUG [RS:2;86162e2766a8:35535 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-05T19:53:46,736 DEBUG [RS:0;86162e2766a8:44613 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-05T19:53:46,737 DEBUG [RS:2;86162e2766a8:35535 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-05T19:53:46,737 INFO [RS:2;86162e2766a8:35535 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-05T19:53:46,737 INFO [RS:2;86162e2766a8:35535 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-05T19:53:46,737 DEBUG [RS:0;86162e2766a8:44613 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-05T19:53:46,737 INFO [RS:0;86162e2766a8:44613 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-05T19:53:46,737 INFO [RS:0;86162e2766a8:44613 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-05T19:53:46,740 DEBUG [RS:1;86162e2766a8:46093 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-05T19:53:46,740 INFO [RS:1;86162e2766a8:46093 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-05T19:53:46,740 INFO [RS:1;86162e2766a8:46093 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-05T19:53:46,809 WARN [86162e2766a8:35797 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-05T19:53:46,844 INFO [RS:1;86162e2766a8:46093 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-05T19:53:46,844 INFO [RS:0;86162e2766a8:44613 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-05T19:53:46,844 INFO [RS:2;86162e2766a8:35535 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-05T19:53:46,848 INFO [RS:1;86162e2766a8:46093 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=86162e2766a8%2C46093%2C1733428425343, suffix=, logDir=hdfs://localhost:46315/user/jenkins/test-data/1a6465ef-adda-b6b8-36b8-07863dede557/WALs/86162e2766a8,46093,1733428425343, archiveDir=hdfs://localhost:46315/user/jenkins/test-data/1a6465ef-adda-b6b8-36b8-07863dede557/oldWALs, maxLogs=32 2024-12-05T19:53:46,848 INFO [RS:0;86162e2766a8:44613 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=86162e2766a8%2C44613%2C1733428425231, suffix=, logDir=hdfs://localhost:46315/user/jenkins/test-data/1a6465ef-adda-b6b8-36b8-07863dede557/WALs/86162e2766a8,44613,1733428425231, archiveDir=hdfs://localhost:46315/user/jenkins/test-data/1a6465ef-adda-b6b8-36b8-07863dede557/oldWALs, maxLogs=32 2024-12-05T19:53:46,848 INFO [RS:2;86162e2766a8:35535 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=86162e2766a8%2C35535%2C1733428425409, suffix=, logDir=hdfs://localhost:46315/user/jenkins/test-data/1a6465ef-adda-b6b8-36b8-07863dede557/WALs/86162e2766a8,35535,1733428425409, archiveDir=hdfs://localhost:46315/user/jenkins/test-data/1a6465ef-adda-b6b8-36b8-07863dede557/oldWALs, maxLogs=32 2024-12-05T19:53:46,868 DEBUG [RS:2;86162e2766a8:35535 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/1a6465ef-adda-b6b8-36b8-07863dede557/WALs/86162e2766a8,35535,1733428425409/86162e2766a8%2C35535%2C1733428425409.1733428426853, exclude list is [], retry=0 2024-12-05T19:53:46,874 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:37605,DS-5149af7e-c5ad-44aa-827f-2681ec8ad3f1,DISK] 2024-12-05T19:53:46,874 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:40207,DS-f41606fc-e486-4adc-b1c2-ef9ee721c920,DISK] 2024-12-05T19:53:46,874 DEBUG [RS:1;86162e2766a8:46093 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/1a6465ef-adda-b6b8-36b8-07863dede557/WALs/86162e2766a8,46093,1733428425343/86162e2766a8%2C46093%2C1733428425343.1733428426853, exclude list is [], retry=0 2024-12-05T19:53:46,874 DEBUG [RS:0;86162e2766a8:44613 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/1a6465ef-adda-b6b8-36b8-07863dede557/WALs/86162e2766a8,44613,1733428425231/86162e2766a8%2C44613%2C1733428425231.1733428426853, exclude list is [], retry=0 2024-12-05T19:53:46,874 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44813,DS-2cff1dd6-4cc4-4fde-8aea-38e4792115d1,DISK] 2024-12-05T19:53:46,918 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:37605,DS-5149af7e-c5ad-44aa-827f-2681ec8ad3f1,DISK] 2024-12-05T19:53:46,918 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:40207,DS-f41606fc-e486-4adc-b1c2-ef9ee721c920,DISK] 2024-12-05T19:53:46,918 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44813,DS-2cff1dd6-4cc4-4fde-8aea-38e4792115d1,DISK] 2024-12-05T19:53:46,918 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44813,DS-2cff1dd6-4cc4-4fde-8aea-38e4792115d1,DISK] 2024-12-05T19:53:46,918 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:37605,DS-5149af7e-c5ad-44aa-827f-2681ec8ad3f1,DISK] 2024-12-05T19:53:46,919 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:40207,DS-f41606fc-e486-4adc-b1c2-ef9ee721c920,DISK] 2024-12-05T19:53:46,922 INFO [RS:2;86162e2766a8:35535 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/1a6465ef-adda-b6b8-36b8-07863dede557/WALs/86162e2766a8,35535,1733428425409/86162e2766a8%2C35535%2C1733428425409.1733428426853 2024-12-05T19:53:46,923 DEBUG [RS:2;86162e2766a8:35535 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:42277:42277),(127.0.0.1/127.0.0.1:39295:39295),(127.0.0.1/127.0.0.1:44963:44963)] 2024-12-05T19:53:46,924 INFO [RS:0;86162e2766a8:44613 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/1a6465ef-adda-b6b8-36b8-07863dede557/WALs/86162e2766a8,44613,1733428425231/86162e2766a8%2C44613%2C1733428425231.1733428426853 2024-12-05T19:53:46,924 DEBUG [RS:0;86162e2766a8:44613 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:44963:44963),(127.0.0.1/127.0.0.1:42277:42277),(127.0.0.1/127.0.0.1:39295:39295)] 2024-12-05T19:53:46,927 INFO [RS:1;86162e2766a8:46093 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/1a6465ef-adda-b6b8-36b8-07863dede557/WALs/86162e2766a8,46093,1733428425343/86162e2766a8%2C46093%2C1733428425343.1733428426853 2024-12-05T19:53:46,927 DEBUG [RS:1;86162e2766a8:46093 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:42277:42277),(127.0.0.1/127.0.0.1:44963:44963),(127.0.0.1/127.0.0.1:39295:39295)] 2024-12-05T19:53:47,062 DEBUG [86162e2766a8:35797 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=3, allServersCount=3 2024-12-05T19:53:47,070 DEBUG [86162e2766a8:35797 {}] balancer.BalancerClusterState(204): Hosts are {86162e2766a8=0} racks are {/default-rack=0} 2024-12-05T19:53:47,077 DEBUG [86162e2766a8:35797 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-05T19:53:47,077 DEBUG [86162e2766a8:35797 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-05T19:53:47,077 DEBUG [86162e2766a8:35797 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-05T19:53:47,077 DEBUG [86162e2766a8:35797 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-05T19:53:47,077 DEBUG [86162e2766a8:35797 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-05T19:53:47,077 DEBUG [86162e2766a8:35797 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-05T19:53:47,077 INFO [86162e2766a8:35797 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-05T19:53:47,077 INFO [86162e2766a8:35797 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-05T19:53:47,077 INFO [86162e2766a8:35797 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-05T19:53:47,077 DEBUG [86162e2766a8:35797 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-05T19:53:47,086 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=86162e2766a8,46093,1733428425343 2024-12-05T19:53:47,093 INFO [PEWorker-2 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 86162e2766a8,46093,1733428425343, state=OPENING 2024-12-05T19:53:47,099 DEBUG [PEWorker-2 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-05T19:53:47,100 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35535-0x10063be64fe0003, quorum=127.0.0.1:60123, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T19:53:47,100 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44613-0x10063be64fe0001, quorum=127.0.0.1:60123, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T19:53:47,100 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46093-0x10063be64fe0002, quorum=127.0.0.1:60123, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T19:53:47,100 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35797-0x10063be64fe0000, quorum=127.0.0.1:60123, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T19:53:47,101 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-05T19:53:47,101 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-05T19:53:47,101 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-05T19:53:47,102 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-05T19:53:47,103 DEBUG [PEWorker-2 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-05T19:53:47,106 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=86162e2766a8,46093,1733428425343}] 2024-12-05T19:53:47,283 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-05T19:53:47,286 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52527, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-05T19:53:47,299 INFO [RS_OPEN_META-regionserver/86162e2766a8:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-05T19:53:47,300 INFO [RS_OPEN_META-regionserver/86162e2766a8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-05T19:53:47,300 INFO [RS_OPEN_META-regionserver/86162e2766a8:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-12-05T19:53:47,303 INFO [RS_OPEN_META-regionserver/86162e2766a8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=86162e2766a8%2C46093%2C1733428425343.meta, suffix=.meta, logDir=hdfs://localhost:46315/user/jenkins/test-data/1a6465ef-adda-b6b8-36b8-07863dede557/WALs/86162e2766a8,46093,1733428425343, archiveDir=hdfs://localhost:46315/user/jenkins/test-data/1a6465ef-adda-b6b8-36b8-07863dede557/oldWALs, maxLogs=32 2024-12-05T19:53:47,320 DEBUG [RS_OPEN_META-regionserver/86162e2766a8:0-0 {event_type=M_RS_OPEN_META, pid=3}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/1a6465ef-adda-b6b8-36b8-07863dede557/WALs/86162e2766a8,46093,1733428425343/86162e2766a8%2C46093%2C1733428425343.meta.1733428427305.meta, exclude list is [], retry=0 2024-12-05T19:53:47,324 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:37605,DS-5149af7e-c5ad-44aa-827f-2681ec8ad3f1,DISK] 2024-12-05T19:53:47,324 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44813,DS-2cff1dd6-4cc4-4fde-8aea-38e4792115d1,DISK] 2024-12-05T19:53:47,324 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:40207,DS-f41606fc-e486-4adc-b1c2-ef9ee721c920,DISK] 2024-12-05T19:53:47,328 INFO [RS_OPEN_META-regionserver/86162e2766a8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/1a6465ef-adda-b6b8-36b8-07863dede557/WALs/86162e2766a8,46093,1733428425343/86162e2766a8%2C46093%2C1733428425343.meta.1733428427305.meta 2024-12-05T19:53:47,328 DEBUG [RS_OPEN_META-regionserver/86162e2766a8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:42277:42277),(127.0.0.1/127.0.0.1:44963:44963),(127.0.0.1/127.0.0.1:39295:39295)] 2024-12-05T19:53:47,328 DEBUG [RS_OPEN_META-regionserver/86162e2766a8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-05T19:53:47,330 DEBUG [RS_OPEN_META-regionserver/86162e2766a8:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-05T19:53:47,333 DEBUG [RS_OPEN_META-regionserver/86162e2766a8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-05T19:53:47,338 INFO [RS_OPEN_META-regionserver/86162e2766a8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-05T19:53:47,343 DEBUG [RS_OPEN_META-regionserver/86162e2766a8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-05T19:53:47,343 DEBUG [RS_OPEN_META-regionserver/86162e2766a8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T19:53:47,343 DEBUG [RS_OPEN_META-regionserver/86162e2766a8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-05T19:53:47,343 DEBUG [RS_OPEN_META-regionserver/86162e2766a8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-05T19:53:47,347 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-05T19:53:47,349 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-05T19:53:47,349 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T19:53:47,350 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-05T19:53:47,350 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-05T19:53:47,352 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-05T19:53:47,352 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T19:53:47,353 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-05T19:53:47,354 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-05T19:53:47,355 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-05T19:53:47,356 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T19:53:47,357 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-05T19:53:47,357 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-05T19:53:47,358 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-05T19:53:47,359 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T19:53:47,359 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-05T19:53:47,360 DEBUG [RS_OPEN_META-regionserver/86162e2766a8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-05T19:53:47,361 DEBUG [RS_OPEN_META-regionserver/86162e2766a8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46315/user/jenkins/test-data/1a6465ef-adda-b6b8-36b8-07863dede557/data/hbase/meta/1588230740 2024-12-05T19:53:47,365 DEBUG [RS_OPEN_META-regionserver/86162e2766a8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46315/user/jenkins/test-data/1a6465ef-adda-b6b8-36b8-07863dede557/data/hbase/meta/1588230740 2024-12-05T19:53:47,367 DEBUG [RS_OPEN_META-regionserver/86162e2766a8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-05T19:53:47,367 DEBUG [RS_OPEN_META-regionserver/86162e2766a8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-05T19:53:47,368 DEBUG [RS_OPEN_META-regionserver/86162e2766a8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-05T19:53:47,371 DEBUG [RS_OPEN_META-regionserver/86162e2766a8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-05T19:53:47,373 INFO [RS_OPEN_META-regionserver/86162e2766a8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=60872677, jitterRate=-0.09292642772197723}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-05T19:53:47,373 DEBUG [RS_OPEN_META-regionserver/86162e2766a8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-05T19:53:47,375 DEBUG [RS_OPEN_META-regionserver/86162e2766a8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733428427344Writing region info on filesystem at 1733428427344Initializing all the Stores at 1733428427346 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733428427346Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733428427347 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733428427347Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733428427347Cleaning up temporary data from old regions at 1733428427367 (+20 ms)Running coprocessor post-open hooks at 1733428427373 (+6 ms)Region opened successfully at 1733428427375 (+2 ms) 2024-12-05T19:53:47,386 INFO [RS_OPEN_META-regionserver/86162e2766a8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733428427274 2024-12-05T19:53:47,402 DEBUG [RS_OPEN_META-regionserver/86162e2766a8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-05T19:53:47,403 INFO [RS_OPEN_META-regionserver/86162e2766a8:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-05T19:53:47,405 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=86162e2766a8,46093,1733428425343 2024-12-05T19:53:47,407 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 86162e2766a8,46093,1733428425343, state=OPEN 2024-12-05T19:53:47,409 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35535-0x10063be64fe0003, quorum=127.0.0.1:60123, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-05T19:53:47,409 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46093-0x10063be64fe0002, quorum=127.0.0.1:60123, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-05T19:53:47,409 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44613-0x10063be64fe0001, quorum=127.0.0.1:60123, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-05T19:53:47,409 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35797-0x10063be64fe0000, quorum=127.0.0.1:60123, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-05T19:53:47,410 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-05T19:53:47,410 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-05T19:53:47,410 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-05T19:53:47,410 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-05T19:53:47,410 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=86162e2766a8,46093,1733428425343 2024-12-05T19:53:47,416 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-05T19:53:47,416 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=86162e2766a8,46093,1733428425343 in 305 msec 2024-12-05T19:53:47,423 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-05T19:53:47,423 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 774 msec 2024-12-05T19:53:47,424 DEBUG [PEWorker-3 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-05T19:53:47,424 INFO [PEWorker-3 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-05T19:53:47,450 DEBUG [PEWorker-3 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-05T19:53:47,451 DEBUG [PEWorker-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=86162e2766a8,46093,1733428425343, seqNum=-1] 2024-12-05T19:53:47,474 DEBUG [PEWorker-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-05T19:53:47,477 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:32859, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-05T19:53:47,525 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.1560 sec 2024-12-05T19:53:47,526 INFO [master/86162e2766a8:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733428427525, completionTime=-1 2024-12-05T19:53:47,530 INFO [master/86162e2766a8:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=3; waited=0ms, expected min=3 server(s), max=3 server(s), master is running 2024-12-05T19:53:47,530 DEBUG [master/86162e2766a8:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-05T19:53:47,575 INFO [master/86162e2766a8:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=3 2024-12-05T19:53:47,575 INFO [master/86162e2766a8:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733428487575 2024-12-05T19:53:47,575 INFO [master/86162e2766a8:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733428547575 2024-12-05T19:53:47,575 INFO [master/86162e2766a8:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 44 msec 2024-12-05T19:53:47,577 DEBUG [master/86162e2766a8:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 1588230740 changed from -1.0 to 0.0, refreshing cache 2024-12-05T19:53:47,583 INFO [master/86162e2766a8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=86162e2766a8,35797,1733428424518-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-05T19:53:47,584 INFO [master/86162e2766a8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=86162e2766a8,35797,1733428424518-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-05T19:53:47,584 INFO [master/86162e2766a8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=86162e2766a8,35797,1733428424518-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-05T19:53:47,585 INFO [master/86162e2766a8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-86162e2766a8:35797, period=300000, unit=MILLISECONDS is enabled. 2024-12-05T19:53:47,586 INFO [master/86162e2766a8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-05T19:53:47,589 INFO [master/86162e2766a8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-05T19:53:47,594 DEBUG [master/86162e2766a8:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-05T19:53:47,619 INFO [master/86162e2766a8:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 2.122sec 2024-12-05T19:53:47,622 INFO [master/86162e2766a8:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-05T19:53:47,623 INFO [master/86162e2766a8:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-05T19:53:47,624 INFO [master/86162e2766a8:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-05T19:53:47,625 INFO [master/86162e2766a8:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-05T19:53:47,625 INFO [master/86162e2766a8:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-05T19:53:47,626 INFO [master/86162e2766a8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=86162e2766a8,35797,1733428424518-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-05T19:53:47,627 INFO [master/86162e2766a8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=86162e2766a8,35797,1733428424518-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-05T19:53:47,631 DEBUG [master/86162e2766a8:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-05T19:53:47,632 INFO [master/86162e2766a8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-05T19:53:47,632 INFO [master/86162e2766a8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=86162e2766a8,35797,1733428424518-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-05T19:53:47,645 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@418871fc, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T19:53:47,650 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-12-05T19:53:47,650 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-12-05T19:53:47,654 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 86162e2766a8,35797,-1 for getting cluster id 2024-12-05T19:53:47,656 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-05T19:53:47,666 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '978dde15-16c2-4d9f-86a7-2daab0b203f1' 2024-12-05T19:53:47,669 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-05T19:53:47,669 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "978dde15-16c2-4d9f-86a7-2daab0b203f1" 2024-12-05T19:53:47,670 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@53d7b575, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T19:53:47,670 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [86162e2766a8,35797,-1] 2024-12-05T19:53:47,672 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-05T19:53:47,674 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T19:53:47,675 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57934, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-05T19:53:47,678 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7f46cc90, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T19:53:47,679 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-05T19:53:47,687 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=86162e2766a8,46093,1733428425343, seqNum=-1] 2024-12-05T19:53:47,688 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-05T19:53:47,690 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52996, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-05T19:53:47,718 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=86162e2766a8,35797,1733428424518 2024-12-05T19:53:47,725 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-05T19:53:47,732 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.AsyncConnectionImpl(321): The fetched master address is 86162e2766a8,35797,1733428424518 2024-12-05T19:53:47,735 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@6bc7313f 2024-12-05T19:53:47,736 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-05T19:53:47,738 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57948, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-05T19:53:47,746 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35797 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-05T19:53:47,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35797 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC 2024-12-05T19:53:47,760 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_PRE_OPERATION 2024-12-05T19:53:47,763 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35797 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestHBaseWalOnEC" procId is: 4 2024-12-05T19:53:47,764 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T19:53:47,767 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-05T19:53:47,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35797 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-05T19:53:47,782 WARN [PEWorker-2 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-05T19:53:47,782 WARN [PEWorker-2 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-05T19:53:47,791 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_919299418_22 at /127.0.0.1:34296 [Receiving block BP-1009711422-172.17.0.2-1733428421158:blk_-9223372036854775680_1020] {}] datanode.DataXceiver(331): 127.0.0.1:40207:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34296 dst: /127.0.0.1:40207 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-05T19:53:47,803 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40207 is added to blk_-9223372036854775680_1021 (size=392) 2024-12-05T19:53:47,805 WARN [PEWorker-2 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-05T19:53:47,809 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => c30b1e06aa6300b8b120f7b00d0d9d44, NAME => 'TestHBaseWalOnEC,,1733428427740.c30b1e06aa6300b8b120f7b00d0d9d44.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:46315/user/jenkins/test-data/1a6465ef-adda-b6b8-36b8-07863dede557 2024-12-05T19:53:47,829 WARN [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-05T19:53:47,829 WARN [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-05T19:53:47,842 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_919299418_22 at /127.0.0.1:34988 [Receiving block BP-1009711422-172.17.0.2-1733428421158:blk_-9223372036854775664_1022] {}] datanode.DataXceiver(331): 127.0.0.1:37605:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34988 dst: /127.0.0.1:37605 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-05T19:53:47,854 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37605 is added to blk_-9223372036854775664_1023 (size=51) 2024-12-05T19:53:47,856 WARN [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-05T19:53:47,856 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1733428427740.c30b1e06aa6300b8b120f7b00d0d9d44.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T19:53:47,857 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1722): Closing c30b1e06aa6300b8b120f7b00d0d9d44, disabling compactions & flushes 2024-12-05T19:53:47,857 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1733428427740.c30b1e06aa6300b8b120f7b00d0d9d44. 2024-12-05T19:53:47,857 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1733428427740.c30b1e06aa6300b8b120f7b00d0d9d44. 2024-12-05T19:53:47,857 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1733428427740.c30b1e06aa6300b8b120f7b00d0d9d44. after waiting 0 ms 2024-12-05T19:53:47,857 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1733428427740.c30b1e06aa6300b8b120f7b00d0d9d44. 2024-12-05T19:53:47,857 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1733428427740.c30b1e06aa6300b8b120f7b00d0d9d44. 2024-12-05T19:53:47,857 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1676): Region close journal for c30b1e06aa6300b8b120f7b00d0d9d44: Waiting for close lock at 1733428427856Disabling compacts and flushes for region at 1733428427856Disabling writes for close at 1733428427857 (+1 ms)Writing region close event to WAL at 1733428427857Closed at 1733428427857 2024-12-05T19:53:47,860 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ADD_TO_META 2024-12-05T19:53:47,868 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestHBaseWalOnEC,,1733428427740.c30b1e06aa6300b8b120f7b00d0d9d44.","families":{"info":[{"qualifier":"regioninfo","vlen":50,"tag":[],"timestamp":"1733428427861"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733428427861"}]},"ts":"1733428427861"} 2024-12-05T19:53:47,875 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-05T19:53:47,878 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-05T19:53:47,882 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733428427878"}]},"ts":"1733428427878"} 2024-12-05T19:53:47,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35797 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-05T19:53:47,889 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLING in hbase:meta 2024-12-05T19:53:47,891 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(204): Hosts are {86162e2766a8=0} racks are {/default-rack=0} 2024-12-05T19:53:47,894 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-05T19:53:47,894 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-05T19:53:47,894 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-05T19:53:47,894 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-05T19:53:47,894 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-05T19:53:47,894 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-05T19:53:47,894 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-05T19:53:47,894 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-05T19:53:47,894 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-05T19:53:47,894 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-05T19:53:47,896 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=c30b1e06aa6300b8b120f7b00d0d9d44, ASSIGN}] 2024-12-05T19:53:47,900 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=c30b1e06aa6300b8b120f7b00d0d9d44, ASSIGN 2024-12-05T19:53:47,903 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=c30b1e06aa6300b8b120f7b00d0d9d44, ASSIGN; state=OFFLINE, location=86162e2766a8,46093,1733428425343; forceNewPlan=false, retain=false 2024-12-05T19:53:48,056 INFO [86162e2766a8:35797 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-05T19:53:48,057 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=c30b1e06aa6300b8b120f7b00d0d9d44, regionState=OPENING, regionLocation=86162e2766a8,46093,1733428425343 2024-12-05T19:53:48,063 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=c30b1e06aa6300b8b120f7b00d0d9d44, ASSIGN because future has completed 2024-12-05T19:53:48,064 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure c30b1e06aa6300b8b120f7b00d0d9d44, server=86162e2766a8,46093,1733428425343}] 2024-12-05T19:53:48,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35797 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-05T19:53:48,225 INFO [RS_OPEN_REGION-regionserver/86162e2766a8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestHBaseWalOnEC,,1733428427740.c30b1e06aa6300b8b120f7b00d0d9d44. 2024-12-05T19:53:48,226 DEBUG [RS_OPEN_REGION-regionserver/86162e2766a8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => c30b1e06aa6300b8b120f7b00d0d9d44, NAME => 'TestHBaseWalOnEC,,1733428427740.c30b1e06aa6300b8b120f7b00d0d9d44.', STARTKEY => '', ENDKEY => ''} 2024-12-05T19:53:48,226 DEBUG [RS_OPEN_REGION-regionserver/86162e2766a8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestHBaseWalOnEC c30b1e06aa6300b8b120f7b00d0d9d44 2024-12-05T19:53:48,226 DEBUG [RS_OPEN_REGION-regionserver/86162e2766a8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1733428427740.c30b1e06aa6300b8b120f7b00d0d9d44.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T19:53:48,226 DEBUG [RS_OPEN_REGION-regionserver/86162e2766a8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for c30b1e06aa6300b8b120f7b00d0d9d44 2024-12-05T19:53:48,226 DEBUG [RS_OPEN_REGION-regionserver/86162e2766a8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for c30b1e06aa6300b8b120f7b00d0d9d44 2024-12-05T19:53:48,229 INFO [StoreOpener-c30b1e06aa6300b8b120f7b00d0d9d44-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region c30b1e06aa6300b8b120f7b00d0d9d44 2024-12-05T19:53:48,231 INFO [StoreOpener-c30b1e06aa6300b8b120f7b00d0d9d44-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region c30b1e06aa6300b8b120f7b00d0d9d44 columnFamilyName cf 2024-12-05T19:53:48,231 DEBUG [StoreOpener-c30b1e06aa6300b8b120f7b00d0d9d44-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T19:53:48,232 INFO [StoreOpener-c30b1e06aa6300b8b120f7b00d0d9d44-1 {}] regionserver.HStore(327): Store=c30b1e06aa6300b8b120f7b00d0d9d44/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T19:53:48,232 DEBUG [RS_OPEN_REGION-regionserver/86162e2766a8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for c30b1e06aa6300b8b120f7b00d0d9d44 2024-12-05T19:53:48,233 DEBUG [RS_OPEN_REGION-regionserver/86162e2766a8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46315/user/jenkins/test-data/1a6465ef-adda-b6b8-36b8-07863dede557/data/default/TestHBaseWalOnEC/c30b1e06aa6300b8b120f7b00d0d9d44 2024-12-05T19:53:48,234 DEBUG [RS_OPEN_REGION-regionserver/86162e2766a8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46315/user/jenkins/test-data/1a6465ef-adda-b6b8-36b8-07863dede557/data/default/TestHBaseWalOnEC/c30b1e06aa6300b8b120f7b00d0d9d44 2024-12-05T19:53:48,235 DEBUG [RS_OPEN_REGION-regionserver/86162e2766a8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for c30b1e06aa6300b8b120f7b00d0d9d44 2024-12-05T19:53:48,235 DEBUG [RS_OPEN_REGION-regionserver/86162e2766a8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for c30b1e06aa6300b8b120f7b00d0d9d44 2024-12-05T19:53:48,237 DEBUG [RS_OPEN_REGION-regionserver/86162e2766a8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for c30b1e06aa6300b8b120f7b00d0d9d44 2024-12-05T19:53:48,242 DEBUG [RS_OPEN_REGION-regionserver/86162e2766a8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46315/user/jenkins/test-data/1a6465ef-adda-b6b8-36b8-07863dede557/data/default/TestHBaseWalOnEC/c30b1e06aa6300b8b120f7b00d0d9d44/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-05T19:53:48,243 INFO [RS_OPEN_REGION-regionserver/86162e2766a8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened c30b1e06aa6300b8b120f7b00d0d9d44; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=69752403, jitterRate=0.03939180076122284}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-05T19:53:48,243 DEBUG [RS_OPEN_REGION-regionserver/86162e2766a8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for c30b1e06aa6300b8b120f7b00d0d9d44 2024-12-05T19:53:48,245 DEBUG [RS_OPEN_REGION-regionserver/86162e2766a8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for c30b1e06aa6300b8b120f7b00d0d9d44: Running coprocessor pre-open hook at 1733428428227Writing region info on filesystem at 1733428428227Initializing all the Stores at 1733428428228 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733428428228Cleaning up temporary data from old regions at 1733428428235 (+7 ms)Running coprocessor post-open hooks at 1733428428244 (+9 ms)Region opened successfully at 1733428428245 (+1 ms) 2024-12-05T19:53:48,247 INFO [RS_OPEN_REGION-regionserver/86162e2766a8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestHBaseWalOnEC,,1733428427740.c30b1e06aa6300b8b120f7b00d0d9d44., pid=6, masterSystemTime=1733428428218 2024-12-05T19:53:48,251 DEBUG [RS_OPEN_REGION-regionserver/86162e2766a8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestHBaseWalOnEC,,1733428427740.c30b1e06aa6300b8b120f7b00d0d9d44. 2024-12-05T19:53:48,251 INFO [RS_OPEN_REGION-regionserver/86162e2766a8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestHBaseWalOnEC,,1733428427740.c30b1e06aa6300b8b120f7b00d0d9d44. 2024-12-05T19:53:48,252 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=c30b1e06aa6300b8b120f7b00d0d9d44, regionState=OPEN, openSeqNum=2, regionLocation=86162e2766a8,46093,1733428425343 2024-12-05T19:53:48,256 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure c30b1e06aa6300b8b120f7b00d0d9d44, server=86162e2766a8,46093,1733428425343 because future has completed 2024-12-05T19:53:48,261 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-05T19:53:48,262 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure c30b1e06aa6300b8b120f7b00d0d9d44, server=86162e2766a8,46093,1733428425343 in 194 msec 2024-12-05T19:53:48,266 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-05T19:53:48,266 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=c30b1e06aa6300b8b120f7b00d0d9d44, ASSIGN in 366 msec 2024-12-05T19:53:48,267 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-05T19:53:48,267 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733428428267"}]},"ts":"1733428428267"} 2024-12-05T19:53:48,270 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLED in hbase:meta 2024-12-05T19:53:48,272 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_POST_OPERATION 2024-12-05T19:53:48,275 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC in 522 msec 2024-12-05T19:53:48,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35797 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-05T19:53:48,406 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestHBaseWalOnEC completed 2024-12-05T19:53:48,406 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table TestHBaseWalOnEC get assigned. Timeout = 60000ms 2024-12-05T19:53:48,407 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-05T19:53:48,412 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table TestHBaseWalOnEC assigned to meta. Checking AM states. 2024-12-05T19:53:48,413 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-05T19:53:48,414 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table TestHBaseWalOnEC assigned. 2024-12-05T19:53:48,422 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestHBaseWalOnEC', row='row', locateType=CURRENT is [region=TestHBaseWalOnEC,,1733428427740.c30b1e06aa6300b8b120f7b00d0d9d44., hostname=86162e2766a8,46093,1733428425343, seqNum=2] 2024-12-05T19:53:48,432 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35797 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestHBaseWalOnEC 2024-12-05T19:53:48,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35797 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC 2024-12-05T19:53:48,439 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_PREPARE 2024-12-05T19:53:48,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35797 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-05T19:53:48,441 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-05T19:53:48,442 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-05T19:53:48,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35797 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-05T19:53:48,605 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46093 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-12-05T19:53:48,606 DEBUG [RS_FLUSH_OPERATIONS-regionserver/86162e2766a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestHBaseWalOnEC,,1733428427740.c30b1e06aa6300b8b120f7b00d0d9d44. 2024-12-05T19:53:48,610 INFO [RS_FLUSH_OPERATIONS-regionserver/86162e2766a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing c30b1e06aa6300b8b120f7b00d0d9d44 1/1 column families, dataSize=32 B heapSize=360 B 2024-12-05T19:53:48,667 DEBUG [RS_FLUSH_OPERATIONS-regionserver/86162e2766a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46315/user/jenkins/test-data/1a6465ef-adda-b6b8-36b8-07863dede557/data/default/TestHBaseWalOnEC/c30b1e06aa6300b8b120f7b00d0d9d44/.tmp/cf/595e57e14ed3490187d814259357efa1 is 36, key is row/cf:cq/1733428428424/Put/seqid=0 2024-12-05T19:53:48,674 WARN [RS_FLUSH_OPERATIONS-regionserver/86162e2766a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-05T19:53:48,674 WARN [RS_FLUSH_OPERATIONS-regionserver/86162e2766a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-05T19:53:48,678 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2070522285_22 at /127.0.0.1:34304 [Receiving block BP-1009711422-172.17.0.2-1733428421158:blk_-9223372036854775648_1024] {}] datanode.DataXceiver(331): 127.0.0.1:40207:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34304 dst: /127.0.0.1:40207 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-05T19:53:48,684 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40207 is added to blk_-9223372036854775648_1025 (size=4787) 2024-12-05T19:53:48,685 WARN [RS_FLUSH_OPERATIONS-regionserver/86162e2766a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-05T19:53:48,685 INFO [RS_FLUSH_OPERATIONS-regionserver/86162e2766a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=32 B at sequenceid=5 (bloomFilter=false), to=hdfs://localhost:46315/user/jenkins/test-data/1a6465ef-adda-b6b8-36b8-07863dede557/data/default/TestHBaseWalOnEC/c30b1e06aa6300b8b120f7b00d0d9d44/.tmp/cf/595e57e14ed3490187d814259357efa1 2024-12-05T19:53:48,733 DEBUG [RS_FLUSH_OPERATIONS-regionserver/86162e2766a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46315/user/jenkins/test-data/1a6465ef-adda-b6b8-36b8-07863dede557/data/default/TestHBaseWalOnEC/c30b1e06aa6300b8b120f7b00d0d9d44/.tmp/cf/595e57e14ed3490187d814259357efa1 as hdfs://localhost:46315/user/jenkins/test-data/1a6465ef-adda-b6b8-36b8-07863dede557/data/default/TestHBaseWalOnEC/c30b1e06aa6300b8b120f7b00d0d9d44/cf/595e57e14ed3490187d814259357efa1 2024-12-05T19:53:48,743 INFO [RS_FLUSH_OPERATIONS-regionserver/86162e2766a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46315/user/jenkins/test-data/1a6465ef-adda-b6b8-36b8-07863dede557/data/default/TestHBaseWalOnEC/c30b1e06aa6300b8b120f7b00d0d9d44/cf/595e57e14ed3490187d814259357efa1, entries=1, sequenceid=5, filesize=4.7 K 2024-12-05T19:53:48,752 INFO [RS_FLUSH_OPERATIONS-regionserver/86162e2766a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~32 B/32, heapSize ~344 B/344, currentSize=0 B/0 for c30b1e06aa6300b8b120f7b00d0d9d44 in 140ms, sequenceid=5, compaction requested=false 2024-12-05T19:53:48,754 DEBUG [RS_FLUSH_OPERATIONS-regionserver/86162e2766a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestHBaseWalOnEC' 2024-12-05T19:53:48,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35797 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-05T19:53:48,757 DEBUG [RS_FLUSH_OPERATIONS-regionserver/86162e2766a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for c30b1e06aa6300b8b120f7b00d0d9d44: 2024-12-05T19:53:48,757 DEBUG [RS_FLUSH_OPERATIONS-regionserver/86162e2766a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestHBaseWalOnEC,,1733428427740.c30b1e06aa6300b8b120f7b00d0d9d44. 2024-12-05T19:53:48,759 DEBUG [RS_FLUSH_OPERATIONS-regionserver/86162e2766a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-12-05T19:53:48,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35797 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-12-05T19:53:48,769 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-12-05T19:53:48,769 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 322 msec 2024-12-05T19:53:48,773 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC in 337 msec 2024-12-05T19:53:49,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35797 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-05T19:53:49,066 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestHBaseWalOnEC completed 2024-12-05T19:53:49,083 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-05T19:53:49,084 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-05T19:53:49,084 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-05T19:53:49,091 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T19:53:49,091 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T19:53:49,091 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-05T19:53:49,092 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-05T19:53:49,092 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1496649153, stopped=false 2024-12-05T19:53:49,092 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=86162e2766a8,35797,1733428424518 2024-12-05T19:53:49,095 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46093-0x10063be64fe0002, quorum=127.0.0.1:60123, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-05T19:53:49,095 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46093-0x10063be64fe0002, quorum=127.0.0.1:60123, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T19:53:49,095 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35797-0x10063be64fe0000, quorum=127.0.0.1:60123, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-05T19:53:49,095 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35797-0x10063be64fe0000, quorum=127.0.0.1:60123, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T19:53:49,095 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44613-0x10063be64fe0001, quorum=127.0.0.1:60123, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-05T19:53:49,095 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44613-0x10063be64fe0001, quorum=127.0.0.1:60123, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T19:53:49,095 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35535-0x10063be64fe0003, quorum=127.0.0.1:60123, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-05T19:53:49,095 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35535-0x10063be64fe0003, quorum=127.0.0.1:60123, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T19:53:49,096 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:46093-0x10063be64fe0002, quorum=127.0.0.1:60123, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-05T19:53:49,096 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-05T19:53:49,096 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:35797-0x10063be64fe0000, quorum=127.0.0.1:60123, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-05T19:53:49,097 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-05T19:53:49,097 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-05T19:53:49,097 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T19:53:49,097 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:35535-0x10063be64fe0003, quorum=127.0.0.1:60123, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-05T19:53:49,098 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:44613-0x10063be64fe0001, quorum=127.0.0.1:60123, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-05T19:53:49,098 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '86162e2766a8,44613,1733428425231' ***** 2024-12-05T19:53:49,098 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-05T19:53:49,098 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '86162e2766a8,46093,1733428425343' ***** 2024-12-05T19:53:49,098 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-05T19:53:49,098 INFO [RS:0;86162e2766a8:44613 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-05T19:53:49,099 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '86162e2766a8,35535,1733428425409' ***** 2024-12-05T19:53:49,099 INFO [RS:1;86162e2766a8:46093 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-05T19:53:49,099 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-05T19:53:49,099 INFO [RS:1;86162e2766a8:46093 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-05T19:53:49,099 INFO [RS:0;86162e2766a8:44613 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-05T19:53:49,099 INFO [RS:2;86162e2766a8:35535 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-05T19:53:49,099 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-05T19:53:49,099 INFO [RS:0;86162e2766a8:44613 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-05T19:53:49,099 INFO [RS:1;86162e2766a8:46093 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-05T19:53:49,099 INFO [RS:2;86162e2766a8:35535 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-05T19:53:49,099 INFO [RS:2;86162e2766a8:35535 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-05T19:53:49,099 INFO [RS:0;86162e2766a8:44613 {}] regionserver.HRegionServer(959): stopping server 86162e2766a8,44613,1733428425231 2024-12-05T19:53:49,099 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-05T19:53:49,099 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-05T19:53:49,099 INFO [RS:2;86162e2766a8:35535 {}] regionserver.HRegionServer(959): stopping server 86162e2766a8,35535,1733428425409 2024-12-05T19:53:49,099 INFO [RS:0;86162e2766a8:44613 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-05T19:53:49,099 INFO [RS:2;86162e2766a8:35535 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-05T19:53:49,099 INFO [RS:0;86162e2766a8:44613 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;86162e2766a8:44613. 2024-12-05T19:53:49,099 INFO [RS:2;86162e2766a8:35535 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:2;86162e2766a8:35535. 2024-12-05T19:53:49,099 INFO [RS:1;86162e2766a8:46093 {}] regionserver.HRegionServer(3091): Received CLOSE for c30b1e06aa6300b8b120f7b00d0d9d44 2024-12-05T19:53:49,099 DEBUG [RS:0;86162e2766a8:44613 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-05T19:53:49,100 DEBUG [RS:0;86162e2766a8:44613 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T19:53:49,100 DEBUG [RS:2;86162e2766a8:35535 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-05T19:53:49,100 DEBUG [RS:2;86162e2766a8:35535 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T19:53:49,100 INFO [RS:0;86162e2766a8:44613 {}] regionserver.HRegionServer(976): stopping server 86162e2766a8,44613,1733428425231; all regions closed. 2024-12-05T19:53:49,100 INFO [RS:2;86162e2766a8:35535 {}] regionserver.HRegionServer(976): stopping server 86162e2766a8,35535,1733428425409; all regions closed. 2024-12-05T19:53:49,102 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_-9223372036854775773_1004 (size=42) 2024-12-05T19:53:49,105 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_-9223372036854775708_1013 (size=1321) 2024-12-05T19:53:49,107 INFO [RS:1;86162e2766a8:46093 {}] regionserver.HRegionServer(959): stopping server 86162e2766a8,46093,1733428425343 2024-12-05T19:53:49,107 INFO [RS:1;86162e2766a8:46093 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-05T19:53:49,107 INFO [RS:1;86162e2766a8:46093 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;86162e2766a8:46093. 2024-12-05T19:53:49,107 DEBUG [RS:1;86162e2766a8:46093 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-05T19:53:49,107 DEBUG [RS:1;86162e2766a8:46093 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T19:53:49,107 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37605 is added to blk_-9223372036854775709_1013 (size=1321) 2024-12-05T19:53:49,107 DEBUG [RS_CLOSE_REGION-regionserver/86162e2766a8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing c30b1e06aa6300b8b120f7b00d0d9d44, disabling compactions & flushes 2024-12-05T19:53:49,107 INFO [RS:1;86162e2766a8:46093 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-05T19:53:49,107 INFO [RS:1;86162e2766a8:46093 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-05T19:53:49,107 INFO [RS_CLOSE_REGION-regionserver/86162e2766a8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1733428427740.c30b1e06aa6300b8b120f7b00d0d9d44. 2024-12-05T19:53:49,107 INFO [RS:1;86162e2766a8:46093 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-05T19:53:49,107 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37605 is added to blk_-9223372036854775772_1004 (size=42) 2024-12-05T19:53:49,108 DEBUG [RS_CLOSE_REGION-regionserver/86162e2766a8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1733428427740.c30b1e06aa6300b8b120f7b00d0d9d44. 2024-12-05T19:53:49,108 DEBUG [RS_CLOSE_REGION-regionserver/86162e2766a8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1733428427740.c30b1e06aa6300b8b120f7b00d0d9d44. after waiting 0 ms 2024-12-05T19:53:49,108 INFO [RS:1;86162e2766a8:46093 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-05T19:53:49,108 DEBUG [RS_CLOSE_REGION-regionserver/86162e2766a8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1733428427740.c30b1e06aa6300b8b120f7b00d0d9d44. 2024-12-05T19:53:49,113 INFO [RS:1;86162e2766a8:46093 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-12-05T19:53:49,113 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_-9223372036854775741_1008 (size=1189) 2024-12-05T19:53:49,114 DEBUG [RS:1;86162e2766a8:46093 {}] regionserver.HRegionServer(1325): Online Regions={c30b1e06aa6300b8b120f7b00d0d9d44=TestHBaseWalOnEC,,1733428427740.c30b1e06aa6300b8b120f7b00d0d9d44., 1588230740=hbase:meta,,1.1588230740} 2024-12-05T19:53:49,114 DEBUG [RS_CLOSE_META-regionserver/86162e2766a8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-05T19:53:49,114 DEBUG [RS:1;86162e2766a8:46093 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, c30b1e06aa6300b8b120f7b00d0d9d44 2024-12-05T19:53:49,114 INFO [RS_CLOSE_META-regionserver/86162e2766a8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-05T19:53:49,114 DEBUG [RS_CLOSE_META-regionserver/86162e2766a8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-05T19:53:49,114 DEBUG [RS_CLOSE_META-regionserver/86162e2766a8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-05T19:53:49,114 DEBUG [RS_CLOSE_META-regionserver/86162e2766a8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-05T19:53:49,115 INFO [RS_CLOSE_META-regionserver/86162e2766a8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.34 KB heapSize=3.38 KB 2024-12-05T19:53:49,115 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40207 is added to blk_-9223372036854775724_1010 (size=34) 2024-12-05T19:53:49,117 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37605 is added to blk_1073741826_1016 (size=93) 2024-12-05T19:53:49,117 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741826_1016 (size=93) 2024-12-05T19:53:49,117 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40207 is added to blk_1073741826_1016 (size=93) 2024-12-05T19:53:49,122 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40207 is added to blk_1073741827_1017 (size=93) 2024-12-05T19:53:49,129 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37605 is added to blk_-9223372036854775740_1008 (size=1189) 2024-12-05T19:53:49,130 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741827_1017 (size=93) 2024-12-05T19:53:49,130 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37605 is added to blk_1073741827_1017 (size=93) 2024-12-05T19:53:49,130 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37605 is added to blk_-9223372036854775725_1010 (size=34) 2024-12-05T19:53:49,132 DEBUG [RS:2;86162e2766a8:35535 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/1a6465ef-adda-b6b8-36b8-07863dede557/oldWALs 2024-12-05T19:53:49,132 INFO [RS:2;86162e2766a8:35535 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 86162e2766a8%2C35535%2C1733428425409:(num 1733428426853) 2024-12-05T19:53:49,132 DEBUG [RS:2;86162e2766a8:35535 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T19:53:49,132 INFO [RS:2;86162e2766a8:35535 {}] regionserver.LeaseManager(133): Closed leases 2024-12-05T19:53:49,133 INFO [RS:2;86162e2766a8:35535 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-05T19:53:49,133 INFO [RS:2;86162e2766a8:35535 {}] hbase.ChoreService(370): Chore service for: regionserver/86162e2766a8:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-05T19:53:49,133 INFO [RS:2;86162e2766a8:35535 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-05T19:53:49,134 INFO [RS:2;86162e2766a8:35535 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-05T19:53:49,134 INFO [regionserver/86162e2766a8:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-05T19:53:49,134 INFO [RS:2;86162e2766a8:35535 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-05T19:53:49,134 INFO [RS:2;86162e2766a8:35535 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-05T19:53:49,134 INFO [RS:2;86162e2766a8:35535 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:35535 2024-12-05T19:53:49,135 DEBUG [RS:0;86162e2766a8:44613 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/1a6465ef-adda-b6b8-36b8-07863dede557/oldWALs 2024-12-05T19:53:49,135 INFO [RS:0;86162e2766a8:44613 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 86162e2766a8%2C44613%2C1733428425231:(num 1733428426853) 2024-12-05T19:53:49,135 DEBUG [RS:0;86162e2766a8:44613 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T19:53:49,135 INFO [RS:0;86162e2766a8:44613 {}] regionserver.LeaseManager(133): Closed leases 2024-12-05T19:53:49,135 INFO [RS:0;86162e2766a8:44613 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-05T19:53:49,136 INFO [RS:0;86162e2766a8:44613 {}] hbase.ChoreService(370): Chore service for: regionserver/86162e2766a8:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-05T19:53:49,136 INFO [RS:0;86162e2766a8:44613 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-05T19:53:49,136 INFO [RS:0;86162e2766a8:44613 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-05T19:53:49,136 INFO [RS:0;86162e2766a8:44613 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-05T19:53:49,136 INFO [RS:0;86162e2766a8:44613 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-05T19:53:49,136 INFO [RS:0;86162e2766a8:44613 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:44613 2024-12-05T19:53:49,136 INFO [regionserver/86162e2766a8:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-05T19:53:49,138 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44613-0x10063be64fe0001, quorum=127.0.0.1:60123, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/86162e2766a8,44613,1733428425231 2024-12-05T19:53:49,138 INFO [RS:0;86162e2766a8:44613 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-05T19:53:49,138 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35797-0x10063be64fe0000, quorum=127.0.0.1:60123, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-05T19:53:49,139 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35535-0x10063be64fe0003, quorum=127.0.0.1:60123, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/86162e2766a8,35535,1733428425409 2024-12-05T19:53:49,139 INFO [RS:2;86162e2766a8:35535 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-05T19:53:49,141 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [86162e2766a8,35535,1733428425409] 2024-12-05T19:53:49,142 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/86162e2766a8,35535,1733428425409 already deleted, retry=false 2024-12-05T19:53:49,143 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 86162e2766a8,35535,1733428425409 expired; onlineServers=2 2024-12-05T19:53:49,143 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [86162e2766a8,44613,1733428425231] 2024-12-05T19:53:49,144 DEBUG [RS_CLOSE_REGION-regionserver/86162e2766a8:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46315/user/jenkins/test-data/1a6465ef-adda-b6b8-36b8-07863dede557/data/default/TestHBaseWalOnEC/c30b1e06aa6300b8b120f7b00d0d9d44/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-05T19:53:49,146 INFO [RS_CLOSE_REGION-regionserver/86162e2766a8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1733428427740.c30b1e06aa6300b8b120f7b00d0d9d44. 2024-12-05T19:53:49,146 DEBUG [RS_CLOSE_REGION-regionserver/86162e2766a8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for c30b1e06aa6300b8b120f7b00d0d9d44: Waiting for close lock at 1733428429107Running coprocessor pre-close hooks at 1733428429107Disabling compacts and flushes for region at 1733428429107Disabling writes for close at 1733428429108 (+1 ms)Writing region close event to WAL at 1733428429129 (+21 ms)Running coprocessor post-close hooks at 1733428429145 (+16 ms)Closed at 1733428429146 (+1 ms) 2024-12-05T19:53:49,146 DEBUG [RS_CLOSE_REGION-regionserver/86162e2766a8:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestHBaseWalOnEC,,1733428427740.c30b1e06aa6300b8b120f7b00d0d9d44. 2024-12-05T19:53:49,148 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/86162e2766a8,44613,1733428425231 already deleted, retry=false 2024-12-05T19:53:49,148 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 86162e2766a8,44613,1733428425231 expired; onlineServers=1 2024-12-05T19:53:49,170 INFO [regionserver/86162e2766a8:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-05T19:53:49,170 INFO [regionserver/86162e2766a8:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-05T19:53:49,173 DEBUG [RS_CLOSE_META-regionserver/86162e2766a8:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46315/user/jenkins/test-data/1a6465ef-adda-b6b8-36b8-07863dede557/data/hbase/meta/1588230740/.tmp/info/5e4a64932a6040eda043de8be56f2baa is 153, key is TestHBaseWalOnEC,,1733428427740.c30b1e06aa6300b8b120f7b00d0d9d44./info:regioninfo/1733428428252/Put/seqid=0 2024-12-05T19:53:49,175 INFO [regionserver/86162e2766a8:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-05T19:53:49,177 WARN [RS_CLOSE_META-regionserver/86162e2766a8:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-05T19:53:49,177 WARN [RS_CLOSE_META-regionserver/86162e2766a8:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-05T19:53:49,181 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2070522285_22 at /127.0.0.1:34328 [Receiving block BP-1009711422-172.17.0.2-1733428421158:blk_-9223372036854775632_1026] {}] datanode.DataXceiver(331): 127.0.0.1:40207:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34328 dst: /127.0.0.1:40207 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-05T19:53:49,186 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40207 is added to blk_-9223372036854775632_1027 (size=6637) 2024-12-05T19:53:49,187 WARN [RS_CLOSE_META-regionserver/86162e2766a8:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-05T19:53:49,188 INFO [RS_CLOSE_META-regionserver/86162e2766a8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.18 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:46315/user/jenkins/test-data/1a6465ef-adda-b6b8-36b8-07863dede557/data/hbase/meta/1588230740/.tmp/info/5e4a64932a6040eda043de8be56f2baa 2024-12-05T19:53:49,218 DEBUG [RS_CLOSE_META-regionserver/86162e2766a8:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46315/user/jenkins/test-data/1a6465ef-adda-b6b8-36b8-07863dede557/data/hbase/meta/1588230740/.tmp/ns/d7e2a4edb1744de6867b6391d151cb94 is 43, key is default/ns:d/1733428427505/Put/seqid=0 2024-12-05T19:53:49,221 WARN [RS_CLOSE_META-regionserver/86162e2766a8:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-05T19:53:49,221 WARN [RS_CLOSE_META-regionserver/86162e2766a8:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-05T19:53:49,226 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2070522285_22 at /127.0.0.1:35056 [Receiving block BP-1009711422-172.17.0.2-1733428421158:blk_-9223372036854775616_1028] {}] datanode.DataXceiver(331): 127.0.0.1:37605:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35056 dst: /127.0.0.1:37605 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-05T19:53:49,233 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37605 is added to blk_-9223372036854775616_1029 (size=5153) 2024-12-05T19:53:49,234 WARN [RS_CLOSE_META-regionserver/86162e2766a8:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-05T19:53:49,234 INFO [RS_CLOSE_META-regionserver/86162e2766a8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:46315/user/jenkins/test-data/1a6465ef-adda-b6b8-36b8-07863dede557/data/hbase/meta/1588230740/.tmp/ns/d7e2a4edb1744de6867b6391d151cb94 2024-12-05T19:53:49,236 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40207 is added to blk_-9223372036854775757_1006 (size=196) 2024-12-05T19:53:49,236 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37605 is added to blk_-9223372036854775756_1006 (size=196) 2024-12-05T19:53:49,241 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35535-0x10063be64fe0003, quorum=127.0.0.1:60123, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-05T19:53:49,241 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35535-0x10063be64fe0003, quorum=127.0.0.1:60123, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-05T19:53:49,242 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44613-0x10063be64fe0001, quorum=127.0.0.1:60123, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-05T19:53:49,242 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44613-0x10063be64fe0001, quorum=127.0.0.1:60123, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-05T19:53:49,242 INFO [RS:0;86162e2766a8:44613 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-05T19:53:49,242 INFO [RS:2;86162e2766a8:35535 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-05T19:53:49,242 INFO [RS:0;86162e2766a8:44613 {}] regionserver.HRegionServer(1031): Exiting; stopping=86162e2766a8,44613,1733428425231; zookeeper connection closed. 2024-12-05T19:53:49,242 INFO [RS:2;86162e2766a8:35535 {}] regionserver.HRegionServer(1031): Exiting; stopping=86162e2766a8,35535,1733428425409; zookeeper connection closed. 2024-12-05T19:53:49,242 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@15f35ce0 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@15f35ce0 2024-12-05T19:53:49,242 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@338d24d2 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@338d24d2 2024-12-05T19:53:49,268 DEBUG [RS_CLOSE_META-regionserver/86162e2766a8:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46315/user/jenkins/test-data/1a6465ef-adda-b6b8-36b8-07863dede557/data/hbase/meta/1588230740/.tmp/table/bb11dfb6815140438a93ed137a3a5e1d is 52, key is TestHBaseWalOnEC/table:state/1733428428267/Put/seqid=0 2024-12-05T19:53:49,271 WARN [RS_CLOSE_META-regionserver/86162e2766a8:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-05T19:53:49,271 WARN [RS_CLOSE_META-regionserver/86162e2766a8:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-05T19:53:49,274 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2070522285_22 at /127.0.0.1:51480 [Receiving block BP-1009711422-172.17.0.2-1733428421158:blk_-9223372036854775600_1030] {}] datanode.DataXceiver(331): 127.0.0.1:44813:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51480 dst: /127.0.0.1:44813 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-05T19:53:49,279 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_-9223372036854775600_1031 (size=5249) 2024-12-05T19:53:49,280 WARN [RS_CLOSE_META-regionserver/86162e2766a8:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-05T19:53:49,280 INFO [RS_CLOSE_META-regionserver/86162e2766a8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=96 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:46315/user/jenkins/test-data/1a6465ef-adda-b6b8-36b8-07863dede557/data/hbase/meta/1588230740/.tmp/table/bb11dfb6815140438a93ed137a3a5e1d 2024-12-05T19:53:49,293 DEBUG [RS_CLOSE_META-regionserver/86162e2766a8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46315/user/jenkins/test-data/1a6465ef-adda-b6b8-36b8-07863dede557/data/hbase/meta/1588230740/.tmp/info/5e4a64932a6040eda043de8be56f2baa as hdfs://localhost:46315/user/jenkins/test-data/1a6465ef-adda-b6b8-36b8-07863dede557/data/hbase/meta/1588230740/info/5e4a64932a6040eda043de8be56f2baa 2024-12-05T19:53:49,304 INFO [RS_CLOSE_META-regionserver/86162e2766a8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46315/user/jenkins/test-data/1a6465ef-adda-b6b8-36b8-07863dede557/data/hbase/meta/1588230740/info/5e4a64932a6040eda043de8be56f2baa, entries=10, sequenceid=11, filesize=6.5 K 2024-12-05T19:53:49,305 DEBUG [RS_CLOSE_META-regionserver/86162e2766a8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46315/user/jenkins/test-data/1a6465ef-adda-b6b8-36b8-07863dede557/data/hbase/meta/1588230740/.tmp/ns/d7e2a4edb1744de6867b6391d151cb94 as hdfs://localhost:46315/user/jenkins/test-data/1a6465ef-adda-b6b8-36b8-07863dede557/data/hbase/meta/1588230740/ns/d7e2a4edb1744de6867b6391d151cb94 2024-12-05T19:53:49,314 DEBUG [RS:1;86162e2766a8:46093 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-12-05T19:53:49,317 INFO [RS_CLOSE_META-regionserver/86162e2766a8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46315/user/jenkins/test-data/1a6465ef-adda-b6b8-36b8-07863dede557/data/hbase/meta/1588230740/ns/d7e2a4edb1744de6867b6391d151cb94, entries=2, sequenceid=11, filesize=5.0 K 2024-12-05T19:53:49,319 DEBUG [RS_CLOSE_META-regionserver/86162e2766a8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46315/user/jenkins/test-data/1a6465ef-adda-b6b8-36b8-07863dede557/data/hbase/meta/1588230740/.tmp/table/bb11dfb6815140438a93ed137a3a5e1d as hdfs://localhost:46315/user/jenkins/test-data/1a6465ef-adda-b6b8-36b8-07863dede557/data/hbase/meta/1588230740/table/bb11dfb6815140438a93ed137a3a5e1d 2024-12-05T19:53:49,329 INFO [RS_CLOSE_META-regionserver/86162e2766a8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46315/user/jenkins/test-data/1a6465ef-adda-b6b8-36b8-07863dede557/data/hbase/meta/1588230740/table/bb11dfb6815140438a93ed137a3a5e1d, entries=2, sequenceid=11, filesize=5.1 K 2024-12-05T19:53:49,331 INFO [RS_CLOSE_META-regionserver/86162e2766a8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 216ms, sequenceid=11, compaction requested=false 2024-12-05T19:53:49,331 DEBUG [RS_CLOSE_META-regionserver/86162e2766a8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-05T19:53:49,340 DEBUG [RS_CLOSE_META-regionserver/86162e2766a8:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46315/user/jenkins/test-data/1a6465ef-adda-b6b8-36b8-07863dede557/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-12-05T19:53:49,341 DEBUG [RS_CLOSE_META-regionserver/86162e2766a8:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-05T19:53:49,341 INFO [RS_CLOSE_META-regionserver/86162e2766a8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-05T19:53:49,342 DEBUG [RS_CLOSE_META-regionserver/86162e2766a8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733428429114Running coprocessor pre-close hooks at 1733428429114Disabling compacts and flushes for region at 1733428429114Disabling writes for close at 1733428429114Obtaining lock to block concurrent updates at 1733428429115 (+1 ms)Preparing flush snapshotting stores in 1588230740 at 1733428429115Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1377, getHeapSize=3392, getOffHeapSize=0, getCellsCount=14 at 1733428429117 (+2 ms)Flushing stores of hbase:meta,,1.1588230740 at 1733428429118 (+1 ms)Flushing 1588230740/info: creating writer at 1733428429118Flushing 1588230740/info: appending metadata at 1733428429168 (+50 ms)Flushing 1588230740/info: closing flushed file at 1733428429169 (+1 ms)Flushing 1588230740/ns: creating writer at 1733428429199 (+30 ms)Flushing 1588230740/ns: appending metadata at 1733428429216 (+17 ms)Flushing 1588230740/ns: closing flushed file at 1733428429216Flushing 1588230740/table: creating writer at 1733428429244 (+28 ms)Flushing 1588230740/table: appending metadata at 1733428429266 (+22 ms)Flushing 1588230740/table: closing flushed file at 1733428429266Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5bf849a4: reopening flushed file at 1733428429291 (+25 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7a464215: reopening flushed file at 1733428429304 (+13 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@255c4500: reopening flushed file at 1733428429317 (+13 ms)Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 216ms, sequenceid=11, compaction requested=false at 1733428429331 (+14 ms)Writing region close event to WAL at 1733428429334 (+3 ms)Running coprocessor post-close hooks at 1733428429341 (+7 ms)Closed at 1733428429341 2024-12-05T19:53:49,342 DEBUG [RS_CLOSE_META-regionserver/86162e2766a8:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-05T19:53:49,514 INFO [RS:1;86162e2766a8:46093 {}] regionserver.HRegionServer(976): stopping server 86162e2766a8,46093,1733428425343; all regions closed. 2024-12-05T19:53:49,518 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40207 is added to blk_1073741829_1019 (size=2751) 2024-12-05T19:53:49,518 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37605 is added to blk_1073741829_1019 (size=2751) 2024-12-05T19:53:49,519 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741829_1019 (size=2751) 2024-12-05T19:53:49,522 DEBUG [RS:1;86162e2766a8:46093 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/1a6465ef-adda-b6b8-36b8-07863dede557/oldWALs 2024-12-05T19:53:49,522 INFO [RS:1;86162e2766a8:46093 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 86162e2766a8%2C46093%2C1733428425343.meta:.meta(num 1733428427305) 2024-12-05T19:53:49,525 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741828_1018 (size=1298) 2024-12-05T19:53:49,525 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40207 is added to blk_1073741828_1018 (size=1298) 2024-12-05T19:53:49,525 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37605 is added to blk_1073741828_1018 (size=1298) 2024-12-05T19:53:49,528 DEBUG [RS:1;86162e2766a8:46093 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/1a6465ef-adda-b6b8-36b8-07863dede557/oldWALs 2024-12-05T19:53:49,528 INFO [RS:1;86162e2766a8:46093 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 86162e2766a8%2C46093%2C1733428425343:(num 1733428426853) 2024-12-05T19:53:49,528 DEBUG [RS:1;86162e2766a8:46093 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T19:53:49,528 INFO [RS:1;86162e2766a8:46093 {}] regionserver.LeaseManager(133): Closed leases 2024-12-05T19:53:49,529 INFO [RS:1;86162e2766a8:46093 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-05T19:53:49,529 INFO [RS:1;86162e2766a8:46093 {}] hbase.ChoreService(370): Chore service for: regionserver/86162e2766a8:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-05T19:53:49,529 INFO [RS:1;86162e2766a8:46093 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-05T19:53:49,529 INFO [regionserver/86162e2766a8:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-05T19:53:49,529 INFO [RS:1;86162e2766a8:46093 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:46093 2024-12-05T19:53:49,531 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35797-0x10063be64fe0000, quorum=127.0.0.1:60123, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-05T19:53:49,531 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46093-0x10063be64fe0002, quorum=127.0.0.1:60123, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/86162e2766a8,46093,1733428425343 2024-12-05T19:53:49,531 INFO [RS:1;86162e2766a8:46093 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-05T19:53:49,533 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [86162e2766a8,46093,1733428425343] 2024-12-05T19:53:49,534 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/86162e2766a8,46093,1733428425343 already deleted, retry=false 2024-12-05T19:53:49,534 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 86162e2766a8,46093,1733428425343 expired; onlineServers=0 2024-12-05T19:53:49,535 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '86162e2766a8,35797,1733428424518' ***** 2024-12-05T19:53:49,535 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-05T19:53:49,535 INFO [M:0;86162e2766a8:35797 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-05T19:53:49,535 INFO [M:0;86162e2766a8:35797 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-05T19:53:49,535 DEBUG [M:0;86162e2766a8:35797 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-05T19:53:49,535 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-05T19:53:49,535 DEBUG [M:0;86162e2766a8:35797 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-05T19:53:49,535 DEBUG [master/86162e2766a8:0:becomeActiveMaster-HFileCleaner.large.0-1733428426498 {}] cleaner.HFileCleaner(306): Exit Thread[master/86162e2766a8:0:becomeActiveMaster-HFileCleaner.large.0-1733428426498,5,FailOnTimeoutGroup] 2024-12-05T19:53:49,535 DEBUG [master/86162e2766a8:0:becomeActiveMaster-HFileCleaner.small.0-1733428426500 {}] cleaner.HFileCleaner(306): Exit Thread[master/86162e2766a8:0:becomeActiveMaster-HFileCleaner.small.0-1733428426500,5,FailOnTimeoutGroup] 2024-12-05T19:53:49,536 INFO [M:0;86162e2766a8:35797 {}] hbase.ChoreService(370): Chore service for: master/86162e2766a8:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-05T19:53:49,536 INFO [M:0;86162e2766a8:35797 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-05T19:53:49,536 DEBUG [M:0;86162e2766a8:35797 {}] master.HMaster(1795): Stopping service threads 2024-12-05T19:53:49,536 INFO [M:0;86162e2766a8:35797 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-05T19:53:49,536 INFO [M:0;86162e2766a8:35797 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-05T19:53:49,537 INFO [M:0;86162e2766a8:35797 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-05T19:53:49,537 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-05T19:53:49,537 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35797-0x10063be64fe0000, quorum=127.0.0.1:60123, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-05T19:53:49,537 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35797-0x10063be64fe0000, quorum=127.0.0.1:60123, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T19:53:49,538 DEBUG [M:0;86162e2766a8:35797 {}] zookeeper.ZKUtil(347): master:35797-0x10063be64fe0000, quorum=127.0.0.1:60123, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-05T19:53:49,538 WARN [M:0;86162e2766a8:35797 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-05T19:53:49,539 INFO [M:0;86162e2766a8:35797 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:46315/user/jenkins/test-data/1a6465ef-adda-b6b8-36b8-07863dede557/.lastflushedseqids 2024-12-05T19:53:49,548 WARN [M:0;86162e2766a8:35797 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-05T19:53:49,548 WARN [M:0;86162e2766a8:35797 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-05T19:53:49,551 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_919299418_22 at /127.0.0.1:51508 [Receiving block BP-1009711422-172.17.0.2-1733428421158:blk_-9223372036854775584_1032] {}] datanode.DataXceiver(331): 127.0.0.1:44813:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51508 dst: /127.0.0.1:44813 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-05T19:53:49,555 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_-9223372036854775584_1033 (size=127) 2024-12-05T19:53:49,555 WARN [M:0;86162e2766a8:35797 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-05T19:53:49,556 INFO [M:0;86162e2766a8:35797 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-05T19:53:49,556 INFO [M:0;86162e2766a8:35797 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-05T19:53:49,556 DEBUG [M:0;86162e2766a8:35797 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-05T19:53:49,556 INFO [M:0;86162e2766a8:35797 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-05T19:53:49,556 DEBUG [M:0;86162e2766a8:35797 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-05T19:53:49,556 DEBUG [M:0;86162e2766a8:35797 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-05T19:53:49,556 DEBUG [M:0;86162e2766a8:35797 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-05T19:53:49,557 INFO [M:0;86162e2766a8:35797 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=26.81 KB heapSize=34.10 KB 2024-12-05T19:53:49,580 DEBUG [M:0;86162e2766a8:35797 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46315/user/jenkins/test-data/1a6465ef-adda-b6b8-36b8-07863dede557/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/20162f7275ac43378a41a9282ae3f4d9 is 82, key is hbase:meta,,1/info:regioninfo/1733428427404/Put/seqid=0 2024-12-05T19:53:49,582 WARN [M:0;86162e2766a8:35797 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-05T19:53:49,582 WARN [M:0;86162e2766a8:35797 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-05T19:53:49,588 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_919299418_22 at /127.0.0.1:34364 [Receiving block BP-1009711422-172.17.0.2-1733428421158:blk_-9223372036854775568_1034] {}] datanode.DataXceiver(331): 127.0.0.1:40207:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34364 dst: /127.0.0.1:40207 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-05T19:53:49,591 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40207 is added to blk_-9223372036854775568_1035 (size=5672) 2024-12-05T19:53:49,592 WARN [M:0;86162e2766a8:35797 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-05T19:53:49,592 INFO [M:0;86162e2766a8:35797 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:46315/user/jenkins/test-data/1a6465ef-adda-b6b8-36b8-07863dede557/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/20162f7275ac43378a41a9282ae3f4d9 2024-12-05T19:53:49,618 DEBUG [M:0;86162e2766a8:35797 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46315/user/jenkins/test-data/1a6465ef-adda-b6b8-36b8-07863dede557/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/a17e2c16c9a24bd4ab0b02a55c03dd22 is 747, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1733428428274/Put/seqid=0 2024-12-05T19:53:49,620 WARN [M:0;86162e2766a8:35797 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-05T19:53:49,620 WARN [M:0;86162e2766a8:35797 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-05T19:53:49,625 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_919299418_22 at /127.0.0.1:35088 [Receiving block BP-1009711422-172.17.0.2-1733428421158:blk_-9223372036854775552_1036] {}] datanode.DataXceiver(331): 127.0.0.1:37605:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35088 dst: /127.0.0.1:37605 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-05T19:53:49,629 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37605 is added to blk_-9223372036854775552_1037 (size=6437) 2024-12-05T19:53:49,629 WARN [M:0;86162e2766a8:35797 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-05T19:53:49,630 INFO [M:0;86162e2766a8:35797 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.12 KB at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:46315/user/jenkins/test-data/1a6465ef-adda-b6b8-36b8-07863dede557/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/a17e2c16c9a24bd4ab0b02a55c03dd22 2024-12-05T19:53:49,633 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46093-0x10063be64fe0002, quorum=127.0.0.1:60123, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-05T19:53:49,633 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46093-0x10063be64fe0002, quorum=127.0.0.1:60123, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-05T19:53:49,633 INFO [RS:1;86162e2766a8:46093 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-05T19:53:49,633 INFO [RS:1;86162e2766a8:46093 {}] regionserver.HRegionServer(1031): Exiting; stopping=86162e2766a8,46093,1733428425343; zookeeper connection closed. 2024-12-05T19:53:49,634 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@6b64951c {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@6b64951c 2024-12-05T19:53:49,634 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 3 regionserver(s) complete 2024-12-05T19:53:49,655 DEBUG [M:0;86162e2766a8:35797 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46315/user/jenkins/test-data/1a6465ef-adda-b6b8-36b8-07863dede557/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/a5188b8bfddf4c68abebb7f99250c0f3 is 69, key is 86162e2766a8,35535,1733428425409/rs:state/1733428426573/Put/seqid=0 2024-12-05T19:53:49,657 WARN [M:0;86162e2766a8:35797 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-05T19:53:49,657 WARN [M:0;86162e2766a8:35797 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-05T19:53:49,659 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_919299418_22 at /127.0.0.1:51524 [Receiving block BP-1009711422-172.17.0.2-1733428421158:blk_-9223372036854775536_1038] {}] datanode.DataXceiver(331): 127.0.0.1:44813:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51524 dst: /127.0.0.1:44813 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-05T19:53:49,663 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_-9223372036854775536_1039 (size=5294) 2024-12-05T19:53:49,664 WARN [M:0;86162e2766a8:35797 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-05T19:53:49,664 INFO [M:0;86162e2766a8:35797 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=195 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:46315/user/jenkins/test-data/1a6465ef-adda-b6b8-36b8-07863dede557/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/a5188b8bfddf4c68abebb7f99250c0f3 2024-12-05T19:53:49,674 DEBUG [M:0;86162e2766a8:35797 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46315/user/jenkins/test-data/1a6465ef-adda-b6b8-36b8-07863dede557/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/20162f7275ac43378a41a9282ae3f4d9 as hdfs://localhost:46315/user/jenkins/test-data/1a6465ef-adda-b6b8-36b8-07863dede557/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/20162f7275ac43378a41a9282ae3f4d9 2024-12-05T19:53:49,683 INFO [M:0;86162e2766a8:35797 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46315/user/jenkins/test-data/1a6465ef-adda-b6b8-36b8-07863dede557/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/20162f7275ac43378a41a9282ae3f4d9, entries=8, sequenceid=72, filesize=5.5 K 2024-12-05T19:53:49,685 DEBUG [M:0;86162e2766a8:35797 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46315/user/jenkins/test-data/1a6465ef-adda-b6b8-36b8-07863dede557/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/a17e2c16c9a24bd4ab0b02a55c03dd22 as hdfs://localhost:46315/user/jenkins/test-data/1a6465ef-adda-b6b8-36b8-07863dede557/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/a17e2c16c9a24bd4ab0b02a55c03dd22 2024-12-05T19:53:49,694 INFO [M:0;86162e2766a8:35797 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46315/user/jenkins/test-data/1a6465ef-adda-b6b8-36b8-07863dede557/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/a17e2c16c9a24bd4ab0b02a55c03dd22, entries=8, sequenceid=72, filesize=6.3 K 2024-12-05T19:53:49,695 DEBUG [M:0;86162e2766a8:35797 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46315/user/jenkins/test-data/1a6465ef-adda-b6b8-36b8-07863dede557/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/a5188b8bfddf4c68abebb7f99250c0f3 as hdfs://localhost:46315/user/jenkins/test-data/1a6465ef-adda-b6b8-36b8-07863dede557/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/a5188b8bfddf4c68abebb7f99250c0f3 2024-12-05T19:53:49,703 INFO [M:0;86162e2766a8:35797 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46315/user/jenkins/test-data/1a6465ef-adda-b6b8-36b8-07863dede557/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/a5188b8bfddf4c68abebb7f99250c0f3, entries=3, sequenceid=72, filesize=5.2 K 2024-12-05T19:53:49,704 INFO [M:0;86162e2766a8:35797 {}] regionserver.HRegion(3140): Finished flush of dataSize ~26.81 KB/27450, heapSize ~33.80 KB/34616, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 148ms, sequenceid=72, compaction requested=false 2024-12-05T19:53:49,706 INFO [M:0;86162e2766a8:35797 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-05T19:53:49,706 DEBUG [M:0;86162e2766a8:35797 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733428429556Disabling compacts and flushes for region at 1733428429556Disabling writes for close at 1733428429556Obtaining lock to block concurrent updates at 1733428429557 (+1 ms)Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733428429557Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=27450, getHeapSize=34856, getOffHeapSize=0, getCellsCount=85 at 1733428429557Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733428429558 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733428429558Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733428429579 (+21 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733428429579Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733428429600 (+21 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733428429617 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733428429617Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733428429638 (+21 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733428429654 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733428429654Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7462f76: reopening flushed file at 1733428429672 (+18 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6b7f9c74: reopening flushed file at 1733428429683 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3067fea4: reopening flushed file at 1733428429694 (+11 ms)Finished flush of dataSize ~26.81 KB/27450, heapSize ~33.80 KB/34616, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 148ms, sequenceid=72, compaction requested=false at 1733428429704 (+10 ms)Writing region close event to WAL at 1733428429706 (+2 ms)Closed at 1733428429706 2024-12-05T19:53:49,709 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40207 is added to blk_1073741825_1011 (size=32653) 2024-12-05T19:53:49,710 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37605 is added to blk_1073741825_1011 (size=32653) 2024-12-05T19:53:49,710 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741825_1011 (size=32653) 2024-12-05T19:53:49,710 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-05T19:53:49,711 INFO [M:0;86162e2766a8:35797 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-05T19:53:49,711 INFO [M:0;86162e2766a8:35797 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:35797 2024-12-05T19:53:49,711 INFO [M:0;86162e2766a8:35797 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-05T19:53:49,813 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35797-0x10063be64fe0000, quorum=127.0.0.1:60123, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-05T19:53:49,813 INFO [M:0;86162e2766a8:35797 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-05T19:53:49,813 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35797-0x10063be64fe0000, quorum=127.0.0.1:60123, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-05T19:53:49,818 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2e59159d{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-05T19:53:49,820 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@a8e922f{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-05T19:53:49,820 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-05T19:53:49,820 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@24f92c39{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-05T19:53:49,821 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@c62369b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/afae0248-3f86-1e7b-058f-943a1d58cb39/hadoop.log.dir/,STOPPED} 2024-12-05T19:53:49,824 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-05T19:53:49,824 WARN [BP-1009711422-172.17.0.2-1733428421158 heartbeating to localhost/127.0.0.1:46315 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-05T19:53:49,824 WARN [BP-1009711422-172.17.0.2-1733428421158 heartbeating to localhost/127.0.0.1:46315 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1009711422-172.17.0.2-1733428421158 (Datanode Uuid c958940b-4ae7-462d-8566-abb6a91d2b6c) service to localhost/127.0.0.1:46315 2024-12-05T19:53:49,824 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-05T19:53:49,825 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/afae0248-3f86-1e7b-058f-943a1d58cb39/cluster_1622064e-d3e6-0ddc-f64d-1a89f33fe697/data/data5/current/BP-1009711422-172.17.0.2-1733428421158 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-05T19:53:49,825 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/afae0248-3f86-1e7b-058f-943a1d58cb39/cluster_1622064e-d3e6-0ddc-f64d-1a89f33fe697/data/data6/current/BP-1009711422-172.17.0.2-1733428421158 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-05T19:53:49,825 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-05T19:53:49,827 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1c6b8f01{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-05T19:53:49,828 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@11f28dd2{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-05T19:53:49,828 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-05T19:53:49,828 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7fa8fa5c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-05T19:53:49,828 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6463ad04{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/afae0248-3f86-1e7b-058f-943a1d58cb39/hadoop.log.dir/,STOPPED} 2024-12-05T19:53:49,830 WARN [BP-1009711422-172.17.0.2-1733428421158 heartbeating to localhost/127.0.0.1:46315 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-05T19:53:49,830 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-05T19:53:49,830 WARN [BP-1009711422-172.17.0.2-1733428421158 heartbeating to localhost/127.0.0.1:46315 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1009711422-172.17.0.2-1733428421158 (Datanode Uuid 0510c327-bcf0-4f71-ae18-e8c5ddacdcf9) service to localhost/127.0.0.1:46315 2024-12-05T19:53:49,830 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-05T19:53:49,830 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/afae0248-3f86-1e7b-058f-943a1d58cb39/cluster_1622064e-d3e6-0ddc-f64d-1a89f33fe697/data/data3/current/BP-1009711422-172.17.0.2-1733428421158 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-05T19:53:49,831 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/afae0248-3f86-1e7b-058f-943a1d58cb39/cluster_1622064e-d3e6-0ddc-f64d-1a89f33fe697/data/data4/current/BP-1009711422-172.17.0.2-1733428421158 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-05T19:53:49,831 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-05T19:53:49,837 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4839957b{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-05T19:53:49,838 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5306f615{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-05T19:53:49,838 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-05T19:53:49,838 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1a2478ad{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-05T19:53:49,838 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@550154bd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/afae0248-3f86-1e7b-058f-943a1d58cb39/hadoop.log.dir/,STOPPED} 2024-12-05T19:53:49,840 WARN [BP-1009711422-172.17.0.2-1733428421158 heartbeating to localhost/127.0.0.1:46315 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-05T19:53:49,840 WARN [BP-1009711422-172.17.0.2-1733428421158 heartbeating to localhost/127.0.0.1:46315 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1009711422-172.17.0.2-1733428421158 (Datanode Uuid 5cbbda21-0fc6-4b59-9453-b5d8c0d6c385) service to localhost/127.0.0.1:46315 2024-12-05T19:53:49,840 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-05T19:53:49,840 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-05T19:53:49,840 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/afae0248-3f86-1e7b-058f-943a1d58cb39/cluster_1622064e-d3e6-0ddc-f64d-1a89f33fe697/data/data1/current/BP-1009711422-172.17.0.2-1733428421158 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-05T19:53:49,841 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/afae0248-3f86-1e7b-058f-943a1d58cb39/cluster_1622064e-d3e6-0ddc-f64d-1a89f33fe697/data/data2/current/BP-1009711422-172.17.0.2-1733428421158 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-05T19:53:49,841 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-05T19:53:49,850 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@76e4c45c{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-05T19:53:49,851 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4637aff6{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-05T19:53:49,851 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-05T19:53:49,851 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@383d55e4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-05T19:53:49,851 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@21b7d177{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/afae0248-3f86-1e7b-058f-943a1d58cb39/hadoop.log.dir/,STOPPED} 2024-12-05T19:53:49,863 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-05T19:53:49,893 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-05T19:53:49,901 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestHBaseWalOnEC#testReadWrite[0] Thread=88 (was 158), OpenFileDescriptor=441 (was 391) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=287 (was 284) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=8614 (was 8903) 2024-12-05T19:53:49,907 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestHBaseWalOnEC#testReadWrite[1] Thread=88, OpenFileDescriptor=441, MaxFileDescriptor=1048576, SystemLoadAverage=287, ProcessCount=11, AvailableMemoryMB=8614 2024-12-05T19:53:49,907 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=3, rsPorts=, rsClass=null, numDataNodes=3, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-05T19:53:49,908 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/afae0248-3f86-1e7b-058f-943a1d58cb39/hadoop.log.dir so I do NOT create it in target/test-data/925e084a-3afc-12ce-5cee-f895408b7226 2024-12-05T19:53:49,908 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/afae0248-3f86-1e7b-058f-943a1d58cb39/hadoop.tmp.dir so I do NOT create it in target/test-data/925e084a-3afc-12ce-5cee-f895408b7226 2024-12-05T19:53:49,908 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/925e084a-3afc-12ce-5cee-f895408b7226/cluster_47eab1e7-b1d9-b3be-804c-321e18c13f11, deleteOnExit=true 2024-12-05T19:53:49,908 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-05T19:53:49,908 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/925e084a-3afc-12ce-5cee-f895408b7226/test.cache.data in system properties and HBase conf 2024-12-05T19:53:49,909 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/925e084a-3afc-12ce-5cee-f895408b7226/hadoop.tmp.dir in system properties and HBase conf 2024-12-05T19:53:49,909 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/925e084a-3afc-12ce-5cee-f895408b7226/hadoop.log.dir in system properties and HBase conf 2024-12-05T19:53:49,909 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/925e084a-3afc-12ce-5cee-f895408b7226/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-05T19:53:49,909 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/925e084a-3afc-12ce-5cee-f895408b7226/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-05T19:53:49,909 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-05T19:53:49,909 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-05T19:53:49,909 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/925e084a-3afc-12ce-5cee-f895408b7226/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-05T19:53:49,909 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/925e084a-3afc-12ce-5cee-f895408b7226/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-05T19:53:49,910 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/925e084a-3afc-12ce-5cee-f895408b7226/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-05T19:53:49,910 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/925e084a-3afc-12ce-5cee-f895408b7226/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-05T19:53:49,910 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/925e084a-3afc-12ce-5cee-f895408b7226/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-05T19:53:49,910 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/925e084a-3afc-12ce-5cee-f895408b7226/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-05T19:53:49,910 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/925e084a-3afc-12ce-5cee-f895408b7226/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-05T19:53:49,910 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/925e084a-3afc-12ce-5cee-f895408b7226/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-05T19:53:49,910 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/925e084a-3afc-12ce-5cee-f895408b7226/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-05T19:53:49,910 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/925e084a-3afc-12ce-5cee-f895408b7226/nfs.dump.dir in system properties and HBase conf 2024-12-05T19:53:49,910 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/925e084a-3afc-12ce-5cee-f895408b7226/java.io.tmpdir in system properties and HBase conf 2024-12-05T19:53:49,910 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/925e084a-3afc-12ce-5cee-f895408b7226/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-05T19:53:49,910 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/925e084a-3afc-12ce-5cee-f895408b7226/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-05T19:53:49,911 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/925e084a-3afc-12ce-5cee-f895408b7226/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-05T19:53:49,997 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-05T19:53:50,003 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-05T19:53:50,004 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-05T19:53:50,004 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-05T19:53:50,005 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-05T19:53:50,005 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-05T19:53:50,006 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1ad8d9de{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/925e084a-3afc-12ce-5cee-f895408b7226/hadoop.log.dir/,AVAILABLE} 2024-12-05T19:53:50,006 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7e58533{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-05T19:53:50,123 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@15027254{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/925e084a-3afc-12ce-5cee-f895408b7226/java.io.tmpdir/jetty-localhost-45605-hadoop-hdfs-3_4_1-tests_jar-_-any-3958944219302986910/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-05T19:53:50,124 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4293887f{HTTP/1.1, (http/1.1)}{localhost:45605} 2024-12-05T19:53:50,124 INFO [Time-limited test {}] server.Server(415): Started @11101ms 2024-12-05T19:53:50,216 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-05T19:53:50,221 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-05T19:53:50,223 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-05T19:53:50,223 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-05T19:53:50,223 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-05T19:53:50,224 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4c4ebd49{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/925e084a-3afc-12ce-5cee-f895408b7226/hadoop.log.dir/,AVAILABLE} 2024-12-05T19:53:50,225 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@72f96008{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-05T19:53:50,343 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@18f854cf{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/925e084a-3afc-12ce-5cee-f895408b7226/java.io.tmpdir/jetty-localhost-33199-hadoop-hdfs-3_4_1-tests_jar-_-any-17386538050135257187/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-05T19:53:50,344 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@70fdfe33{HTTP/1.1, (http/1.1)}{localhost:33199} 2024-12-05T19:53:50,344 INFO [Time-limited test {}] server.Server(415): Started @11322ms 2024-12-05T19:53:50,346 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-05T19:53:50,386 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-05T19:53:50,389 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-05T19:53:50,391 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-05T19:53:50,391 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-05T19:53:50,391 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-05T19:53:50,392 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4b4148d4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/925e084a-3afc-12ce-5cee-f895408b7226/hadoop.log.dir/,AVAILABLE} 2024-12-05T19:53:50,393 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@55cf3a01{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-05T19:53:50,440 WARN [Thread-524 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/925e084a-3afc-12ce-5cee-f895408b7226/cluster_47eab1e7-b1d9-b3be-804c-321e18c13f11/data/data1/current/BP-549980394-172.17.0.2-1733428429944/current, will proceed with Du for space computation calculation, 2024-12-05T19:53:50,441 WARN [Thread-525 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/925e084a-3afc-12ce-5cee-f895408b7226/cluster_47eab1e7-b1d9-b3be-804c-321e18c13f11/data/data2/current/BP-549980394-172.17.0.2-1733428429944/current, will proceed with Du for space computation calculation, 2024-12-05T19:53:50,461 WARN [Thread-503 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-05T19:53:50,464 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xa685f069ccd2320e with lease ID 0x3732b2c7290ecda2: Processing first storage report for DS-50311131-3490-44a5-bc84-2bc82ff40847 from datanode DatanodeRegistration(127.0.0.1:44597, datanodeUuid=f5d09ef1-9248-4d9c-8501-7c71e629f496, infoPort=35159, infoSecurePort=0, ipcPort=33807, storageInfo=lv=-57;cid=testClusterID;nsid=1187745291;c=1733428429944) 2024-12-05T19:53:50,464 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa685f069ccd2320e with lease ID 0x3732b2c7290ecda2: from storage DS-50311131-3490-44a5-bc84-2bc82ff40847 node DatanodeRegistration(127.0.0.1:44597, datanodeUuid=f5d09ef1-9248-4d9c-8501-7c71e629f496, infoPort=35159, infoSecurePort=0, ipcPort=33807, storageInfo=lv=-57;cid=testClusterID;nsid=1187745291;c=1733428429944), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-05T19:53:50,464 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xa685f069ccd2320e with lease ID 0x3732b2c7290ecda2: Processing first storage report for DS-6143992d-f8e6-48bf-8ec1-8bd68f0d3cd6 from datanode DatanodeRegistration(127.0.0.1:44597, datanodeUuid=f5d09ef1-9248-4d9c-8501-7c71e629f496, infoPort=35159, infoSecurePort=0, ipcPort=33807, storageInfo=lv=-57;cid=testClusterID;nsid=1187745291;c=1733428429944) 2024-12-05T19:53:50,464 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa685f069ccd2320e with lease ID 0x3732b2c7290ecda2: from storage DS-6143992d-f8e6-48bf-8ec1-8bd68f0d3cd6 node DatanodeRegistration(127.0.0.1:44597, datanodeUuid=f5d09ef1-9248-4d9c-8501-7c71e629f496, infoPort=35159, infoSecurePort=0, ipcPort=33807, storageInfo=lv=-57;cid=testClusterID;nsid=1187745291;c=1733428429944), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-05T19:53:50,517 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@46f4cd0a{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/925e084a-3afc-12ce-5cee-f895408b7226/java.io.tmpdir/jetty-localhost-38233-hadoop-hdfs-3_4_1-tests_jar-_-any-5093767494556003691/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-05T19:53:50,517 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@700b2317{HTTP/1.1, (http/1.1)}{localhost:38233} 2024-12-05T19:53:50,517 INFO [Time-limited test {}] server.Server(415): Started @11495ms 2024-12-05T19:53:50,519 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-05T19:53:50,554 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-05T19:53:50,558 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-05T19:53:50,559 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-05T19:53:50,559 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-05T19:53:50,559 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-05T19:53:50,560 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@44968fad{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/925e084a-3afc-12ce-5cee-f895408b7226/hadoop.log.dir/,AVAILABLE} 2024-12-05T19:53:50,560 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@40b03519{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-05T19:53:50,605 WARN [Thread-560 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/925e084a-3afc-12ce-5cee-f895408b7226/cluster_47eab1e7-b1d9-b3be-804c-321e18c13f11/data/data4/current/BP-549980394-172.17.0.2-1733428429944/current, will proceed with Du for space computation calculation, 2024-12-05T19:53:50,605 WARN [Thread-559 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/925e084a-3afc-12ce-5cee-f895408b7226/cluster_47eab1e7-b1d9-b3be-804c-321e18c13f11/data/data3/current/BP-549980394-172.17.0.2-1733428429944/current, will proceed with Du for space computation calculation, 2024-12-05T19:53:50,623 WARN [Thread-539 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-05T19:53:50,630 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xb1549dab8eeb58a8 with lease ID 0x3732b2c7290ecda3: Processing first storage report for DS-cc693479-c383-40ea-9e4d-833ad50f261f from datanode DatanodeRegistration(127.0.0.1:44737, datanodeUuid=f4242a26-5320-42bc-9770-968079fa456b, infoPort=41515, infoSecurePort=0, ipcPort=41109, storageInfo=lv=-57;cid=testClusterID;nsid=1187745291;c=1733428429944) 2024-12-05T19:53:50,630 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb1549dab8eeb58a8 with lease ID 0x3732b2c7290ecda3: from storage DS-cc693479-c383-40ea-9e4d-833ad50f261f node DatanodeRegistration(127.0.0.1:44737, datanodeUuid=f4242a26-5320-42bc-9770-968079fa456b, infoPort=41515, infoSecurePort=0, ipcPort=41109, storageInfo=lv=-57;cid=testClusterID;nsid=1187745291;c=1733428429944), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-05T19:53:50,630 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xb1549dab8eeb58a8 with lease ID 0x3732b2c7290ecda3: Processing first storage report for DS-53d76e37-642e-419d-8859-5c7beb3577b1 from datanode DatanodeRegistration(127.0.0.1:44737, datanodeUuid=f4242a26-5320-42bc-9770-968079fa456b, infoPort=41515, infoSecurePort=0, ipcPort=41109, storageInfo=lv=-57;cid=testClusterID;nsid=1187745291;c=1733428429944) 2024-12-05T19:53:50,630 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb1549dab8eeb58a8 with lease ID 0x3732b2c7290ecda3: from storage DS-53d76e37-642e-419d-8859-5c7beb3577b1 node DatanodeRegistration(127.0.0.1:44737, datanodeUuid=f4242a26-5320-42bc-9770-968079fa456b, infoPort=41515, infoSecurePort=0, ipcPort=41109, storageInfo=lv=-57;cid=testClusterID;nsid=1187745291;c=1733428429944), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-05T19:53:50,679 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@f50f857{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/925e084a-3afc-12ce-5cee-f895408b7226/java.io.tmpdir/jetty-localhost-39787-hadoop-hdfs-3_4_1-tests_jar-_-any-13152312902507421009/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-05T19:53:50,679 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7eeef71e{HTTP/1.1, (http/1.1)}{localhost:39787} 2024-12-05T19:53:50,679 INFO [Time-limited test {}] server.Server(415): Started @11657ms 2024-12-05T19:53:50,682 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-05T19:53:50,781 WARN [Thread-586 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/925e084a-3afc-12ce-5cee-f895408b7226/cluster_47eab1e7-b1d9-b3be-804c-321e18c13f11/data/data6/current/BP-549980394-172.17.0.2-1733428429944/current, will proceed with Du for space computation calculation, 2024-12-05T19:53:50,781 WARN [Thread-585 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/925e084a-3afc-12ce-5cee-f895408b7226/cluster_47eab1e7-b1d9-b3be-804c-321e18c13f11/data/data5/current/BP-549980394-172.17.0.2-1733428429944/current, will proceed with Du for space computation calculation, 2024-12-05T19:53:50,806 WARN [Thread-574 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-05T19:53:50,809 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x52fb1b4bedcd9720 with lease ID 0x3732b2c7290ecda4: Processing first storage report for DS-4d1b3946-a234-47fa-b5b4-c994777f8a2a from datanode DatanodeRegistration(127.0.0.1:38381, datanodeUuid=faccaf56-b3b3-4f38-b0da-b058fe16cdd2, infoPort=40321, infoSecurePort=0, ipcPort=36107, storageInfo=lv=-57;cid=testClusterID;nsid=1187745291;c=1733428429944) 2024-12-05T19:53:50,809 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x52fb1b4bedcd9720 with lease ID 0x3732b2c7290ecda4: from storage DS-4d1b3946-a234-47fa-b5b4-c994777f8a2a node DatanodeRegistration(127.0.0.1:38381, datanodeUuid=faccaf56-b3b3-4f38-b0da-b058fe16cdd2, infoPort=40321, infoSecurePort=0, ipcPort=36107, storageInfo=lv=-57;cid=testClusterID;nsid=1187745291;c=1733428429944), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-05T19:53:50,809 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x52fb1b4bedcd9720 with lease ID 0x3732b2c7290ecda4: Processing first storage report for DS-f64f1533-2bf7-4d96-a5f7-d5d4b5f0c006 from datanode DatanodeRegistration(127.0.0.1:38381, datanodeUuid=faccaf56-b3b3-4f38-b0da-b058fe16cdd2, infoPort=40321, infoSecurePort=0, ipcPort=36107, storageInfo=lv=-57;cid=testClusterID;nsid=1187745291;c=1733428429944) 2024-12-05T19:53:50,809 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x52fb1b4bedcd9720 with lease ID 0x3732b2c7290ecda4: from storage DS-f64f1533-2bf7-4d96-a5f7-d5d4b5f0c006 node DatanodeRegistration(127.0.0.1:38381, datanodeUuid=faccaf56-b3b3-4f38-b0da-b058fe16cdd2, infoPort=40321, infoSecurePort=0, ipcPort=36107, storageInfo=lv=-57;cid=testClusterID;nsid=1187745291;c=1733428429944), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-05T19:53:50,912 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/925e084a-3afc-12ce-5cee-f895408b7226 2024-12-05T19:53:50,915 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/925e084a-3afc-12ce-5cee-f895408b7226/cluster_47eab1e7-b1d9-b3be-804c-321e18c13f11/zookeeper_0, clientPort=64776, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/925e084a-3afc-12ce-5cee-f895408b7226/cluster_47eab1e7-b1d9-b3be-804c-321e18c13f11/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/925e084a-3afc-12ce-5cee-f895408b7226/cluster_47eab1e7-b1d9-b3be-804c-321e18c13f11/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-05T19:53:50,916 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=64776 2024-12-05T19:53:50,916 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T19:53:50,918 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T19:53:50,930 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38381 is added to blk_1073741825_1001 (size=7) 2024-12-05T19:53:50,931 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44737 is added to blk_1073741825_1001 (size=7) 2024-12-05T19:53:50,931 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44597 is added to blk_1073741825_1001 (size=7) 2024-12-05T19:53:50,933 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:42213/user/jenkins/test-data/539cdc5c-0834-9c01-3d36-d1b0b68d5527 with version=8 2024-12-05T19:53:50,933 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:46315/user/jenkins/test-data/1a6465ef-adda-b6b8-36b8-07863dede557/hbase-staging 2024-12-05T19:53:50,935 INFO [Time-limited test {}] client.ConnectionUtils(128): master/86162e2766a8:0 server-side Connection retries=45 2024-12-05T19:53:50,935 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-05T19:53:50,935 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-05T19:53:50,936 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-05T19:53:50,936 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-05T19:53:50,936 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-05T19:53:50,936 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-05T19:53:50,936 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-05T19:53:50,937 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:46269 2024-12-05T19:53:50,938 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:46269 connecting to ZooKeeper ensemble=127.0.0.1:64776 2024-12-05T19:53:50,944 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:462690x0, quorum=127.0.0.1:64776, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-05T19:53:50,944 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:46269-0x10063be81290000 connected 2024-12-05T19:53:50,958 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T19:53:50,959 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T19:53:50,961 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:46269-0x10063be81290000, quorum=127.0.0.1:64776, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-05T19:53:50,961 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:42213/user/jenkins/test-data/539cdc5c-0834-9c01-3d36-d1b0b68d5527, hbase.cluster.distributed=false 2024-12-05T19:53:50,963 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:46269-0x10063be81290000, quorum=127.0.0.1:64776, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-05T19:53:50,963 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=46269 2024-12-05T19:53:50,963 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=46269 2024-12-05T19:53:50,964 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=46269 2024-12-05T19:53:50,966 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=46269 2024-12-05T19:53:50,966 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=46269 2024-12-05T19:53:50,982 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/86162e2766a8:0 server-side Connection retries=45 2024-12-05T19:53:50,982 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-05T19:53:50,982 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-05T19:53:50,983 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-05T19:53:50,983 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-05T19:53:50,983 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-05T19:53:50,983 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-05T19:53:50,983 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-05T19:53:50,984 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:40645 2024-12-05T19:53:50,985 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:40645 connecting to ZooKeeper ensemble=127.0.0.1:64776 2024-12-05T19:53:50,986 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T19:53:50,988 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T19:53:50,992 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:406450x0, quorum=127.0.0.1:64776, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-05T19:53:50,993 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:40645-0x10063be81290001 connected 2024-12-05T19:53:50,993 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:40645-0x10063be81290001, quorum=127.0.0.1:64776, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-05T19:53:50,993 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-05T19:53:50,994 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-05T19:53:50,995 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:40645-0x10063be81290001, quorum=127.0.0.1:64776, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-05T19:53:50,996 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:40645-0x10063be81290001, quorum=127.0.0.1:64776, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-05T19:53:50,996 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=40645 2024-12-05T19:53:50,996 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=40645 2024-12-05T19:53:50,997 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=40645 2024-12-05T19:53:50,997 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=40645 2024-12-05T19:53:50,997 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=40645 2024-12-05T19:53:51,012 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/86162e2766a8:0 server-side Connection retries=45 2024-12-05T19:53:51,012 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-05T19:53:51,013 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-05T19:53:51,013 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-05T19:53:51,013 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-05T19:53:51,013 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-05T19:53:51,013 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-05T19:53:51,013 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-05T19:53:51,014 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:40097 2024-12-05T19:53:51,015 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:40097 connecting to ZooKeeper ensemble=127.0.0.1:64776 2024-12-05T19:53:51,016 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T19:53:51,018 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T19:53:51,022 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:400970x0, quorum=127.0.0.1:64776, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-05T19:53:51,022 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:40097-0x10063be81290002 connected 2024-12-05T19:53:51,023 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:40097-0x10063be81290002, quorum=127.0.0.1:64776, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-05T19:53:51,023 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-05T19:53:51,023 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-05T19:53:51,024 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:40097-0x10063be81290002, quorum=127.0.0.1:64776, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-05T19:53:51,025 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:40097-0x10063be81290002, quorum=127.0.0.1:64776, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-05T19:53:51,026 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=40097 2024-12-05T19:53:51,026 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=40097 2024-12-05T19:53:51,026 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=40097 2024-12-05T19:53:51,027 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=40097 2024-12-05T19:53:51,027 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=40097 2024-12-05T19:53:51,042 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/86162e2766a8:0 server-side Connection retries=45 2024-12-05T19:53:51,043 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-05T19:53:51,043 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-05T19:53:51,043 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-05T19:53:51,043 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-05T19:53:51,043 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-05T19:53:51,043 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-05T19:53:51,043 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-05T19:53:51,044 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:39485 2024-12-05T19:53:51,045 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:39485 connecting to ZooKeeper ensemble=127.0.0.1:64776 2024-12-05T19:53:51,046 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T19:53:51,047 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T19:53:51,051 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:394850x0, quorum=127.0.0.1:64776, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-05T19:53:51,052 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39485-0x10063be81290003, quorum=127.0.0.1:64776, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-05T19:53:51,052 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:39485-0x10063be81290003 connected 2024-12-05T19:53:51,052 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-05T19:53:51,054 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-05T19:53:51,054 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39485-0x10063be81290003, quorum=127.0.0.1:64776, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-05T19:53:51,055 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39485-0x10063be81290003, quorum=127.0.0.1:64776, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-05T19:53:51,056 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=39485 2024-12-05T19:53:51,056 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=39485 2024-12-05T19:53:51,056 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=39485 2024-12-05T19:53:51,057 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=39485 2024-12-05T19:53:51,057 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=39485 2024-12-05T19:53:51,068 DEBUG [M:0;86162e2766a8:46269 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;86162e2766a8:46269 2024-12-05T19:53:51,069 INFO [master/86162e2766a8:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/86162e2766a8,46269,1733428430935 2024-12-05T19:53:51,071 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39485-0x10063be81290003, quorum=127.0.0.1:64776, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-05T19:53:51,071 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40645-0x10063be81290001, quorum=127.0.0.1:64776, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-05T19:53:51,071 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40097-0x10063be81290002, quorum=127.0.0.1:64776, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-05T19:53:51,071 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46269-0x10063be81290000, quorum=127.0.0.1:64776, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-05T19:53:51,072 DEBUG [master/86162e2766a8:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:46269-0x10063be81290000, quorum=127.0.0.1:64776, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/86162e2766a8,46269,1733428430935 2024-12-05T19:53:51,076 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40645-0x10063be81290001, quorum=127.0.0.1:64776, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-05T19:53:51,076 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39485-0x10063be81290003, quorum=127.0.0.1:64776, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-05T19:53:51,076 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39485-0x10063be81290003, quorum=127.0.0.1:64776, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T19:53:51,076 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40645-0x10063be81290001, quorum=127.0.0.1:64776, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T19:53:51,076 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40097-0x10063be81290002, quorum=127.0.0.1:64776, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-05T19:53:51,076 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40097-0x10063be81290002, quorum=127.0.0.1:64776, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T19:53:51,076 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46269-0x10063be81290000, quorum=127.0.0.1:64776, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T19:53:51,077 DEBUG [master/86162e2766a8:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:46269-0x10063be81290000, quorum=127.0.0.1:64776, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-05T19:53:51,078 INFO [master/86162e2766a8:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/86162e2766a8,46269,1733428430935 from backup master directory 2024-12-05T19:53:51,079 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40645-0x10063be81290001, quorum=127.0.0.1:64776, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-05T19:53:51,079 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40097-0x10063be81290002, quorum=127.0.0.1:64776, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-05T19:53:51,079 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46269-0x10063be81290000, quorum=127.0.0.1:64776, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/86162e2766a8,46269,1733428430935 2024-12-05T19:53:51,079 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46269-0x10063be81290000, quorum=127.0.0.1:64776, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-05T19:53:51,079 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39485-0x10063be81290003, quorum=127.0.0.1:64776, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-05T19:53:51,080 WARN [master/86162e2766a8:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-05T19:53:51,080 INFO [master/86162e2766a8:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=86162e2766a8,46269,1733428430935 2024-12-05T19:53:51,087 DEBUG [master/86162e2766a8:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:42213/user/jenkins/test-data/539cdc5c-0834-9c01-3d36-d1b0b68d5527/hbase.id] with ID: 62ccd1a9-18a9-4ac6-802c-88ff2711da26 2024-12-05T19:53:51,087 DEBUG [master/86162e2766a8:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:42213/user/jenkins/test-data/539cdc5c-0834-9c01-3d36-d1b0b68d5527/.tmp/hbase.id 2024-12-05T19:53:51,095 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38381 is added to blk_1073741826_1002 (size=42) 2024-12-05T19:53:51,096 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44597 is added to blk_1073741826_1002 (size=42) 2024-12-05T19:53:51,097 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44737 is added to blk_1073741826_1002 (size=42) 2024-12-05T19:53:51,097 DEBUG [master/86162e2766a8:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:42213/user/jenkins/test-data/539cdc5c-0834-9c01-3d36-d1b0b68d5527/.tmp/hbase.id]:[hdfs://localhost:42213/user/jenkins/test-data/539cdc5c-0834-9c01-3d36-d1b0b68d5527/hbase.id] 2024-12-05T19:53:51,114 INFO [master/86162e2766a8:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T19:53:51,114 INFO [master/86162e2766a8:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-05T19:53:51,116 INFO [master/86162e2766a8:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-12-05T19:53:51,119 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46269-0x10063be81290000, quorum=127.0.0.1:64776, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T19:53:51,119 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39485-0x10063be81290003, quorum=127.0.0.1:64776, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T19:53:51,119 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40645-0x10063be81290001, quorum=127.0.0.1:64776, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T19:53:51,119 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40097-0x10063be81290002, quorum=127.0.0.1:64776, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T19:53:51,130 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44597 is added to blk_1073741827_1003 (size=196) 2024-12-05T19:53:51,130 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38381 is added to blk_1073741827_1003 (size=196) 2024-12-05T19:53:51,131 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44737 is added to blk_1073741827_1003 (size=196) 2024-12-05T19:53:51,131 INFO [master/86162e2766a8:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-05T19:53:51,133 INFO [master/86162e2766a8:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-05T19:53:51,133 INFO [master/86162e2766a8:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-05T19:53:51,144 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38381 is added to blk_1073741828_1004 (size=1189) 2024-12-05T19:53:51,144 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44597 is added to blk_1073741828_1004 (size=1189) 2024-12-05T19:53:51,145 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44737 is added to blk_1073741828_1004 (size=1189) 2024-12-05T19:53:51,146 INFO [master/86162e2766a8:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:42213/user/jenkins/test-data/539cdc5c-0834-9c01-3d36-d1b0b68d5527/MasterData/data/master/store 2024-12-05T19:53:51,155 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44737 is added to blk_1073741829_1005 (size=34) 2024-12-05T19:53:51,155 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44597 is added to blk_1073741829_1005 (size=34) 2024-12-05T19:53:51,155 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38381 is added to blk_1073741829_1005 (size=34) 2024-12-05T19:53:51,156 DEBUG [master/86162e2766a8:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T19:53:51,156 DEBUG [master/86162e2766a8:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-05T19:53:51,156 INFO [master/86162e2766a8:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-05T19:53:51,156 DEBUG [master/86162e2766a8:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-05T19:53:51,156 DEBUG [master/86162e2766a8:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-05T19:53:51,156 DEBUG [master/86162e2766a8:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-05T19:53:51,156 INFO [master/86162e2766a8:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-05T19:53:51,156 DEBUG [master/86162e2766a8:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733428431156Disabling compacts and flushes for region at 1733428431156Disabling writes for close at 1733428431156Writing region close event to WAL at 1733428431156Closed at 1733428431156 2024-12-05T19:53:51,157 WARN [master/86162e2766a8:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:42213/user/jenkins/test-data/539cdc5c-0834-9c01-3d36-d1b0b68d5527/MasterData/data/master/store/.initializing 2024-12-05T19:53:51,158 DEBUG [master/86162e2766a8:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:42213/user/jenkins/test-data/539cdc5c-0834-9c01-3d36-d1b0b68d5527/MasterData/WALs/86162e2766a8,46269,1733428430935 2024-12-05T19:53:51,161 INFO [master/86162e2766a8:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=86162e2766a8%2C46269%2C1733428430935, suffix=, logDir=hdfs://localhost:42213/user/jenkins/test-data/539cdc5c-0834-9c01-3d36-d1b0b68d5527/MasterData/WALs/86162e2766a8,46269,1733428430935, archiveDir=hdfs://localhost:42213/user/jenkins/test-data/539cdc5c-0834-9c01-3d36-d1b0b68d5527/MasterData/oldWALs, maxLogs=10 2024-12-05T19:53:51,162 INFO [master/86162e2766a8:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 86162e2766a8%2C46269%2C1733428430935.1733428431162 2024-12-05T19:53:51,171 INFO [master/86162e2766a8:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/539cdc5c-0834-9c01-3d36-d1b0b68d5527/MasterData/WALs/86162e2766a8,46269,1733428430935/86162e2766a8%2C46269%2C1733428430935.1733428431162 2024-12-05T19:53:51,173 DEBUG [master/86162e2766a8:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40321:40321),(127.0.0.1/127.0.0.1:35159:35159),(127.0.0.1/127.0.0.1:41515:41515)] 2024-12-05T19:53:51,175 DEBUG [master/86162e2766a8:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-05T19:53:51,175 DEBUG [master/86162e2766a8:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T19:53:51,175 DEBUG [master/86162e2766a8:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-05T19:53:51,175 DEBUG [master/86162e2766a8:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-05T19:53:51,177 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-05T19:53:51,179 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-05T19:53:51,179 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T19:53:51,180 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-05T19:53:51,180 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-05T19:53:51,181 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-05T19:53:51,181 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T19:53:51,182 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T19:53:51,182 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-05T19:53:51,184 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-05T19:53:51,185 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T19:53:51,185 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T19:53:51,185 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-05T19:53:51,187 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-05T19:53:51,187 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T19:53:51,187 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T19:53:51,188 DEBUG [master/86162e2766a8:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-05T19:53:51,188 DEBUG [master/86162e2766a8:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42213/user/jenkins/test-data/539cdc5c-0834-9c01-3d36-d1b0b68d5527/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-05T19:53:51,189 DEBUG [master/86162e2766a8:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42213/user/jenkins/test-data/539cdc5c-0834-9c01-3d36-d1b0b68d5527/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-05T19:53:51,190 DEBUG [master/86162e2766a8:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-05T19:53:51,191 DEBUG [master/86162e2766a8:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-05T19:53:51,191 DEBUG [master/86162e2766a8:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-05T19:53:51,193 DEBUG [master/86162e2766a8:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-05T19:53:51,196 DEBUG [master/86162e2766a8:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42213/user/jenkins/test-data/539cdc5c-0834-9c01-3d36-d1b0b68d5527/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-05T19:53:51,196 INFO [master/86162e2766a8:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=61902871, jitterRate=-0.07757534086704254}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-05T19:53:51,197 DEBUG [master/86162e2766a8:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733428431175Initializing all the Stores at 1733428431177 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733428431177Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733428431177Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733428431177Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733428431177Cleaning up temporary data from old regions at 1733428431191 (+14 ms)Region opened successfully at 1733428431197 (+6 ms) 2024-12-05T19:53:51,197 INFO [master/86162e2766a8:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-05T19:53:51,201 DEBUG [master/86162e2766a8:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4f969784, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=86162e2766a8/172.17.0.2:0 2024-12-05T19:53:51,202 INFO [master/86162e2766a8:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-05T19:53:51,203 INFO [master/86162e2766a8:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-05T19:53:51,203 INFO [master/86162e2766a8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-05T19:53:51,203 INFO [master/86162e2766a8:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-05T19:53:51,203 INFO [master/86162e2766a8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-12-05T19:53:51,204 INFO [master/86162e2766a8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-12-05T19:53:51,204 INFO [master/86162e2766a8:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-05T19:53:51,206 INFO [master/86162e2766a8:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-05T19:53:51,207 DEBUG [master/86162e2766a8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46269-0x10063be81290000, quorum=127.0.0.1:64776, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-05T19:53:51,209 DEBUG [master/86162e2766a8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-05T19:53:51,209 INFO [master/86162e2766a8:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-05T19:53:51,210 DEBUG [master/86162e2766a8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46269-0x10063be81290000, quorum=127.0.0.1:64776, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-05T19:53:51,211 DEBUG [master/86162e2766a8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-05T19:53:51,211 INFO [master/86162e2766a8:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-05T19:53:51,212 DEBUG [master/86162e2766a8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46269-0x10063be81290000, quorum=127.0.0.1:64776, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-05T19:53:51,213 DEBUG [master/86162e2766a8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-05T19:53:51,214 DEBUG [master/86162e2766a8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46269-0x10063be81290000, quorum=127.0.0.1:64776, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-05T19:53:51,215 DEBUG [master/86162e2766a8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-05T19:53:51,218 DEBUG [master/86162e2766a8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46269-0x10063be81290000, quorum=127.0.0.1:64776, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-05T19:53:51,219 DEBUG [master/86162e2766a8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-05T19:53:51,220 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40645-0x10063be81290001, quorum=127.0.0.1:64776, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-05T19:53:51,220 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40097-0x10063be81290002, quorum=127.0.0.1:64776, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-05T19:53:51,220 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39485-0x10063be81290003, quorum=127.0.0.1:64776, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-05T19:53:51,220 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40097-0x10063be81290002, quorum=127.0.0.1:64776, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T19:53:51,220 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39485-0x10063be81290003, quorum=127.0.0.1:64776, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T19:53:51,220 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46269-0x10063be81290000, quorum=127.0.0.1:64776, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-05T19:53:51,221 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46269-0x10063be81290000, quorum=127.0.0.1:64776, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T19:53:51,221 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40645-0x10063be81290001, quorum=127.0.0.1:64776, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T19:53:51,221 INFO [master/86162e2766a8:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=86162e2766a8,46269,1733428430935, sessionid=0x10063be81290000, setting cluster-up flag (Was=false) 2024-12-05T19:53:51,224 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40645-0x10063be81290001, quorum=127.0.0.1:64776, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T19:53:51,224 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39485-0x10063be81290003, quorum=127.0.0.1:64776, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T19:53:51,224 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46269-0x10063be81290000, quorum=127.0.0.1:64776, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T19:53:51,224 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40097-0x10063be81290002, quorum=127.0.0.1:64776, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T19:53:51,229 DEBUG [master/86162e2766a8:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-05T19:53:51,231 DEBUG [master/86162e2766a8:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=86162e2766a8,46269,1733428430935 2024-12-05T19:53:51,234 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40645-0x10063be81290001, quorum=127.0.0.1:64776, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T19:53:51,234 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39485-0x10063be81290003, quorum=127.0.0.1:64776, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T19:53:51,234 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40097-0x10063be81290002, quorum=127.0.0.1:64776, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T19:53:51,234 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46269-0x10063be81290000, quorum=127.0.0.1:64776, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T19:53:51,239 DEBUG [master/86162e2766a8:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-05T19:53:51,240 DEBUG [master/86162e2766a8:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=86162e2766a8,46269,1733428430935 2024-12-05T19:53:51,241 INFO [master/86162e2766a8:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:42213/user/jenkins/test-data/539cdc5c-0834-9c01-3d36-d1b0b68d5527/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-05T19:53:51,244 DEBUG [master/86162e2766a8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-05T19:53:51,245 INFO [master/86162e2766a8:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-05T19:53:51,245 INFO [master/86162e2766a8:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-05T19:53:51,245 DEBUG [master/86162e2766a8:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 86162e2766a8,46269,1733428430935 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-05T19:53:51,247 DEBUG [master/86162e2766a8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/86162e2766a8:0, corePoolSize=5, maxPoolSize=5 2024-12-05T19:53:51,247 DEBUG [master/86162e2766a8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/86162e2766a8:0, corePoolSize=5, maxPoolSize=5 2024-12-05T19:53:51,247 DEBUG [master/86162e2766a8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/86162e2766a8:0, corePoolSize=5, maxPoolSize=5 2024-12-05T19:53:51,247 DEBUG [master/86162e2766a8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/86162e2766a8:0, corePoolSize=5, maxPoolSize=5 2024-12-05T19:53:51,247 DEBUG [master/86162e2766a8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/86162e2766a8:0, corePoolSize=10, maxPoolSize=10 2024-12-05T19:53:51,247 DEBUG [master/86162e2766a8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/86162e2766a8:0, corePoolSize=1, maxPoolSize=1 2024-12-05T19:53:51,247 DEBUG [master/86162e2766a8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/86162e2766a8:0, corePoolSize=2, maxPoolSize=2 2024-12-05T19:53:51,247 DEBUG [master/86162e2766a8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/86162e2766a8:0, corePoolSize=1, maxPoolSize=1 2024-12-05T19:53:51,249 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-05T19:53:51,250 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-05T19:53:51,251 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T19:53:51,251 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-05T19:53:51,257 INFO [master/86162e2766a8:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733428461257 2024-12-05T19:53:51,257 INFO [master/86162e2766a8:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-05T19:53:51,257 INFO [master/86162e2766a8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-05T19:53:51,257 INFO [master/86162e2766a8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-05T19:53:51,257 INFO [master/86162e2766a8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-05T19:53:51,257 INFO [master/86162e2766a8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-05T19:53:51,258 INFO [master/86162e2766a8:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-05T19:53:51,258 INFO [master/86162e2766a8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-05T19:53:51,259 INFO [master/86162e2766a8:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-05T19:53:51,259 INFO [master/86162e2766a8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-05T19:53:51,259 INFO [RS:0;86162e2766a8:40645 {}] regionserver.HRegionServer(746): ClusterId : 62ccd1a9-18a9-4ac6-802c-88ff2711da26 2024-12-05T19:53:51,259 INFO [master/86162e2766a8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-05T19:53:51,259 DEBUG [RS:0;86162e2766a8:40645 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-05T19:53:51,260 INFO [RS:1;86162e2766a8:40097 {}] regionserver.HRegionServer(746): ClusterId : 62ccd1a9-18a9-4ac6-802c-88ff2711da26 2024-12-05T19:53:51,260 DEBUG [RS:1;86162e2766a8:40097 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-05T19:53:51,260 INFO [RS:2;86162e2766a8:39485 {}] regionserver.HRegionServer(746): ClusterId : 62ccd1a9-18a9-4ac6-802c-88ff2711da26 2024-12-05T19:53:51,260 DEBUG [RS:2;86162e2766a8:39485 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-05T19:53:51,262 DEBUG [RS:0;86162e2766a8:40645 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-05T19:53:51,262 DEBUG [RS:0;86162e2766a8:40645 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-05T19:53:51,262 DEBUG [RS:1;86162e2766a8:40097 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-05T19:53:51,263 DEBUG [RS:1;86162e2766a8:40097 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-05T19:53:51,263 DEBUG [RS:2;86162e2766a8:39485 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-05T19:53:51,263 DEBUG [RS:2;86162e2766a8:39485 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-05T19:53:51,265 DEBUG [RS:0;86162e2766a8:40645 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-05T19:53:51,265 DEBUG [RS:1;86162e2766a8:40097 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-05T19:53:51,265 DEBUG [RS:0;86162e2766a8:40645 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6f81d0a4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=86162e2766a8/172.17.0.2:0 2024-12-05T19:53:51,266 DEBUG [RS:2;86162e2766a8:39485 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-05T19:53:51,266 DEBUG [RS:1;86162e2766a8:40097 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4bca3db4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=86162e2766a8/172.17.0.2:0 2024-12-05T19:53:51,266 DEBUG [RS:2;86162e2766a8:39485 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@acf5b10, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=86162e2766a8/172.17.0.2:0 2024-12-05T19:53:51,268 INFO [master/86162e2766a8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-05T19:53:51,268 INFO [master/86162e2766a8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-05T19:53:51,269 DEBUG [master/86162e2766a8:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/86162e2766a8:0:becomeActiveMaster-HFileCleaner.large.0-1733428431268,5,FailOnTimeoutGroup] 2024-12-05T19:53:51,275 DEBUG [master/86162e2766a8:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/86162e2766a8:0:becomeActiveMaster-HFileCleaner.small.0-1733428431270,5,FailOnTimeoutGroup] 2024-12-05T19:53:51,275 INFO [master/86162e2766a8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-05T19:53:51,275 INFO [master/86162e2766a8:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-05T19:53:51,275 INFO [master/86162e2766a8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-05T19:53:51,275 INFO [master/86162e2766a8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-05T19:53:51,281 DEBUG [RS:1;86162e2766a8:40097 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;86162e2766a8:40097 2024-12-05T19:53:51,281 INFO [RS:1;86162e2766a8:40097 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-05T19:53:51,281 INFO [RS:1;86162e2766a8:40097 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-05T19:53:51,281 DEBUG [RS:1;86162e2766a8:40097 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-05T19:53:51,282 DEBUG [RS:0;86162e2766a8:40645 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;86162e2766a8:40645 2024-12-05T19:53:51,282 INFO [RS:0;86162e2766a8:40645 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-05T19:53:51,282 INFO [RS:0;86162e2766a8:40645 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-05T19:53:51,282 DEBUG [RS:0;86162e2766a8:40645 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-05T19:53:51,282 INFO [RS:1;86162e2766a8:40097 {}] regionserver.HRegionServer(2659): reportForDuty to master=86162e2766a8,46269,1733428430935 with port=40097, startcode=1733428431012 2024-12-05T19:53:51,283 DEBUG [RS:1;86162e2766a8:40097 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-05T19:53:51,284 INFO [RS:0;86162e2766a8:40645 {}] regionserver.HRegionServer(2659): reportForDuty to master=86162e2766a8,46269,1733428430935 with port=40645, startcode=1733428430982 2024-12-05T19:53:51,284 DEBUG [RS:0;86162e2766a8:40645 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-05T19:53:51,284 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44737 is added to blk_1073741831_1007 (size=1321) 2024-12-05T19:53:51,287 DEBUG [RS:2;86162e2766a8:39485 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:2;86162e2766a8:39485 2024-12-05T19:53:51,287 INFO [RS:2;86162e2766a8:39485 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-05T19:53:51,287 INFO [RS:2;86162e2766a8:39485 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-05T19:53:51,287 DEBUG [RS:2;86162e2766a8:39485 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-05T19:53:51,289 INFO [RS:2;86162e2766a8:39485 {}] regionserver.HRegionServer(2659): reportForDuty to master=86162e2766a8,46269,1733428430935 with port=39485, startcode=1733428431042 2024-12-05T19:53:51,289 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44597 is added to blk_1073741831_1007 (size=1321) 2024-12-05T19:53:51,289 DEBUG [RS:2;86162e2766a8:39485 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-05T19:53:51,289 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38381 is added to blk_1073741831_1007 (size=1321) 2024-12-05T19:53:51,290 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:42213/user/jenkins/test-data/539cdc5c-0834-9c01-3d36-d1b0b68d5527/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-05T19:53:51,290 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:42213/user/jenkins/test-data/539cdc5c-0834-9c01-3d36-d1b0b68d5527 2024-12-05T19:53:51,291 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46861, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.4 (auth:SIMPLE), service=RegionServerStatusService 2024-12-05T19:53:51,291 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51979, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.3 (auth:SIMPLE), service=RegionServerStatusService 2024-12-05T19:53:51,291 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56867, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.5 (auth:SIMPLE), service=RegionServerStatusService 2024-12-05T19:53:51,292 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46269 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 86162e2766a8,40097,1733428431012 2024-12-05T19:53:51,292 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46269 {}] master.ServerManager(517): Registering regionserver=86162e2766a8,40097,1733428431012 2024-12-05T19:53:51,295 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46269 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 86162e2766a8,40645,1733428430982 2024-12-05T19:53:51,295 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46269 {}] master.ServerManager(517): Registering regionserver=86162e2766a8,40645,1733428430982 2024-12-05T19:53:51,296 DEBUG [RS:1;86162e2766a8:40097 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:42213/user/jenkins/test-data/539cdc5c-0834-9c01-3d36-d1b0b68d5527 2024-12-05T19:53:51,296 DEBUG [RS:1;86162e2766a8:40097 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:42213 2024-12-05T19:53:51,296 DEBUG [RS:1;86162e2766a8:40097 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-05T19:53:51,298 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46269 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 86162e2766a8,39485,1733428431042 2024-12-05T19:53:51,298 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46269-0x10063be81290000, quorum=127.0.0.1:64776, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-05T19:53:51,298 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46269 {}] master.ServerManager(517): Registering regionserver=86162e2766a8,39485,1733428431042 2024-12-05T19:53:51,299 DEBUG [RS:0;86162e2766a8:40645 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:42213/user/jenkins/test-data/539cdc5c-0834-9c01-3d36-d1b0b68d5527 2024-12-05T19:53:51,299 DEBUG [RS:0;86162e2766a8:40645 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:42213 2024-12-05T19:53:51,299 DEBUG [RS:0;86162e2766a8:40645 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-05T19:53:51,299 DEBUG [RS:1;86162e2766a8:40097 {}] zookeeper.ZKUtil(111): regionserver:40097-0x10063be81290002, quorum=127.0.0.1:64776, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/86162e2766a8,40097,1733428431012 2024-12-05T19:53:51,299 WARN [RS:1;86162e2766a8:40097 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-05T19:53:51,299 INFO [RS:1;86162e2766a8:40097 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-05T19:53:51,299 DEBUG [RS:1;86162e2766a8:40097 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:42213/user/jenkins/test-data/539cdc5c-0834-9c01-3d36-d1b0b68d5527/WALs/86162e2766a8,40097,1733428431012 2024-12-05T19:53:51,301 DEBUG [RS:2;86162e2766a8:39485 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:42213/user/jenkins/test-data/539cdc5c-0834-9c01-3d36-d1b0b68d5527 2024-12-05T19:53:51,301 DEBUG [RS:2;86162e2766a8:39485 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:42213 2024-12-05T19:53:51,301 DEBUG [RS:2;86162e2766a8:39485 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-05T19:53:51,304 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [86162e2766a8,40097,1733428431012] 2024-12-05T19:53:51,304 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [86162e2766a8,40645,1733428430982] 2024-12-05T19:53:51,304 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46269-0x10063be81290000, quorum=127.0.0.1:64776, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-05T19:53:51,306 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38381 is added to blk_1073741832_1008 (size=32) 2024-12-05T19:53:51,306 DEBUG [RS:0;86162e2766a8:40645 {}] zookeeper.ZKUtil(111): regionserver:40645-0x10063be81290001, quorum=127.0.0.1:64776, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/86162e2766a8,40645,1733428430982 2024-12-05T19:53:51,306 WARN [RS:0;86162e2766a8:40645 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-05T19:53:51,306 DEBUG [RS:2;86162e2766a8:39485 {}] zookeeper.ZKUtil(111): regionserver:39485-0x10063be81290003, quorum=127.0.0.1:64776, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/86162e2766a8,39485,1733428431042 2024-12-05T19:53:51,306 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44597 is added to blk_1073741832_1008 (size=32) 2024-12-05T19:53:51,307 WARN [RS:2;86162e2766a8:39485 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-05T19:53:51,307 INFO [RS:0;86162e2766a8:40645 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-05T19:53:51,307 INFO [RS:2;86162e2766a8:39485 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-05T19:53:51,307 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44737 is added to blk_1073741832_1008 (size=32) 2024-12-05T19:53:51,307 DEBUG [RS:0;86162e2766a8:40645 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:42213/user/jenkins/test-data/539cdc5c-0834-9c01-3d36-d1b0b68d5527/WALs/86162e2766a8,40645,1733428430982 2024-12-05T19:53:51,307 DEBUG [RS:2;86162e2766a8:39485 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:42213/user/jenkins/test-data/539cdc5c-0834-9c01-3d36-d1b0b68d5527/WALs/86162e2766a8,39485,1733428431042 2024-12-05T19:53:51,309 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [86162e2766a8,39485,1733428431042] 2024-12-05T19:53:51,310 INFO [RS:1;86162e2766a8:40097 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-05T19:53:51,315 INFO [RS:0;86162e2766a8:40645 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-05T19:53:51,320 INFO [RS:2;86162e2766a8:39485 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-05T19:53:51,320 INFO [RS:1;86162e2766a8:40097 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-05T19:53:51,320 INFO [RS:0;86162e2766a8:40645 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-05T19:53:51,321 INFO [RS:1;86162e2766a8:40097 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-05T19:53:51,321 INFO [RS:1;86162e2766a8:40097 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-05T19:53:51,327 INFO [RS:1;86162e2766a8:40097 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-05T19:53:51,327 INFO [RS:0;86162e2766a8:40645 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-05T19:53:51,327 INFO [RS:0;86162e2766a8:40645 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-05T19:53:51,328 INFO [RS:2;86162e2766a8:39485 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-05T19:53:51,328 INFO [RS:0;86162e2766a8:40645 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-05T19:53:51,328 INFO [RS:1;86162e2766a8:40097 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-05T19:53:51,328 INFO [RS:2;86162e2766a8:39485 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-05T19:53:51,329 INFO [RS:1;86162e2766a8:40097 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-05T19:53:51,329 INFO [RS:2;86162e2766a8:39485 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-05T19:53:51,329 DEBUG [RS:1;86162e2766a8:40097 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/86162e2766a8:0, corePoolSize=1, maxPoolSize=1 2024-12-05T19:53:51,329 DEBUG [RS:1;86162e2766a8:40097 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/86162e2766a8:0, corePoolSize=1, maxPoolSize=1 2024-12-05T19:53:51,329 DEBUG [RS:1;86162e2766a8:40097 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/86162e2766a8:0, corePoolSize=1, maxPoolSize=1 2024-12-05T19:53:51,329 DEBUG [RS:1;86162e2766a8:40097 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/86162e2766a8:0, corePoolSize=1, maxPoolSize=1 2024-12-05T19:53:51,329 DEBUG [RS:1;86162e2766a8:40097 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/86162e2766a8:0, corePoolSize=1, maxPoolSize=1 2024-12-05T19:53:51,329 INFO [RS:0;86162e2766a8:40645 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-05T19:53:51,329 DEBUG [RS:1;86162e2766a8:40097 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/86162e2766a8:0, corePoolSize=2, maxPoolSize=2 2024-12-05T19:53:51,329 DEBUG [RS:1;86162e2766a8:40097 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/86162e2766a8:0, corePoolSize=1, maxPoolSize=1 2024-12-05T19:53:51,329 INFO [RS:0;86162e2766a8:40645 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-05T19:53:51,329 DEBUG [RS:1;86162e2766a8:40097 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/86162e2766a8:0, corePoolSize=1, maxPoolSize=1 2024-12-05T19:53:51,330 DEBUG [RS:1;86162e2766a8:40097 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/86162e2766a8:0, corePoolSize=1, maxPoolSize=1 2024-12-05T19:53:51,330 DEBUG [RS:0;86162e2766a8:40645 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/86162e2766a8:0, corePoolSize=1, maxPoolSize=1 2024-12-05T19:53:51,330 DEBUG [RS:1;86162e2766a8:40097 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/86162e2766a8:0, corePoolSize=1, maxPoolSize=1 2024-12-05T19:53:51,330 DEBUG [RS:0;86162e2766a8:40645 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/86162e2766a8:0, corePoolSize=1, maxPoolSize=1 2024-12-05T19:53:51,330 DEBUG [RS:1;86162e2766a8:40097 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/86162e2766a8:0, corePoolSize=1, maxPoolSize=1 2024-12-05T19:53:51,330 DEBUG [RS:1;86162e2766a8:40097 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/86162e2766a8:0, corePoolSize=1, maxPoolSize=1 2024-12-05T19:53:51,330 DEBUG [RS:0;86162e2766a8:40645 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/86162e2766a8:0, corePoolSize=1, maxPoolSize=1 2024-12-05T19:53:51,330 DEBUG [RS:1;86162e2766a8:40097 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/86162e2766a8:0, corePoolSize=3, maxPoolSize=3 2024-12-05T19:53:51,330 DEBUG [RS:0;86162e2766a8:40645 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/86162e2766a8:0, corePoolSize=1, maxPoolSize=1 2024-12-05T19:53:51,330 DEBUG [RS:1;86162e2766a8:40097 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/86162e2766a8:0, corePoolSize=3, maxPoolSize=3 2024-12-05T19:53:51,330 DEBUG [RS:0;86162e2766a8:40645 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/86162e2766a8:0, corePoolSize=1, maxPoolSize=1 2024-12-05T19:53:51,330 DEBUG [RS:0;86162e2766a8:40645 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/86162e2766a8:0, corePoolSize=2, maxPoolSize=2 2024-12-05T19:53:51,330 DEBUG [RS:0;86162e2766a8:40645 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/86162e2766a8:0, corePoolSize=1, maxPoolSize=1 2024-12-05T19:53:51,330 DEBUG [RS:0;86162e2766a8:40645 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/86162e2766a8:0, corePoolSize=1, maxPoolSize=1 2024-12-05T19:53:51,331 DEBUG [RS:0;86162e2766a8:40645 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/86162e2766a8:0, corePoolSize=1, maxPoolSize=1 2024-12-05T19:53:51,331 DEBUG [RS:0;86162e2766a8:40645 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/86162e2766a8:0, corePoolSize=1, maxPoolSize=1 2024-12-05T19:53:51,331 DEBUG [RS:0;86162e2766a8:40645 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/86162e2766a8:0, corePoolSize=1, maxPoolSize=1 2024-12-05T19:53:51,331 DEBUG [RS:0;86162e2766a8:40645 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/86162e2766a8:0, corePoolSize=1, maxPoolSize=1 2024-12-05T19:53:51,331 DEBUG [RS:0;86162e2766a8:40645 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/86162e2766a8:0, corePoolSize=3, maxPoolSize=3 2024-12-05T19:53:51,331 DEBUG [RS:0;86162e2766a8:40645 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/86162e2766a8:0, corePoolSize=3, maxPoolSize=3 2024-12-05T19:53:51,332 INFO [RS:2;86162e2766a8:39485 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-05T19:53:51,333 INFO [RS:2;86162e2766a8:39485 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-05T19:53:51,333 INFO [RS:2;86162e2766a8:39485 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-05T19:53:51,333 DEBUG [RS:2;86162e2766a8:39485 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/86162e2766a8:0, corePoolSize=1, maxPoolSize=1 2024-12-05T19:53:51,333 DEBUG [RS:2;86162e2766a8:39485 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/86162e2766a8:0, corePoolSize=1, maxPoolSize=1 2024-12-05T19:53:51,333 DEBUG [RS:2;86162e2766a8:39485 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/86162e2766a8:0, corePoolSize=1, maxPoolSize=1 2024-12-05T19:53:51,333 DEBUG [RS:2;86162e2766a8:39485 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/86162e2766a8:0, corePoolSize=1, maxPoolSize=1 2024-12-05T19:53:51,333 DEBUG [RS:2;86162e2766a8:39485 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/86162e2766a8:0, corePoolSize=1, maxPoolSize=1 2024-12-05T19:53:51,333 DEBUG [RS:2;86162e2766a8:39485 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/86162e2766a8:0, corePoolSize=2, maxPoolSize=2 2024-12-05T19:53:51,334 DEBUG [RS:2;86162e2766a8:39485 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/86162e2766a8:0, corePoolSize=1, maxPoolSize=1 2024-12-05T19:53:51,334 DEBUG [RS:2;86162e2766a8:39485 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/86162e2766a8:0, corePoolSize=1, maxPoolSize=1 2024-12-05T19:53:51,334 DEBUG [RS:2;86162e2766a8:39485 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/86162e2766a8:0, corePoolSize=1, maxPoolSize=1 2024-12-05T19:53:51,334 DEBUG [RS:2;86162e2766a8:39485 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/86162e2766a8:0, corePoolSize=1, maxPoolSize=1 2024-12-05T19:53:51,334 DEBUG [RS:2;86162e2766a8:39485 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/86162e2766a8:0, corePoolSize=1, maxPoolSize=1 2024-12-05T19:53:51,334 DEBUG [RS:2;86162e2766a8:39485 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/86162e2766a8:0, corePoolSize=1, maxPoolSize=1 2024-12-05T19:53:51,334 DEBUG [RS:2;86162e2766a8:39485 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/86162e2766a8:0, corePoolSize=3, maxPoolSize=3 2024-12-05T19:53:51,334 DEBUG [RS:2;86162e2766a8:39485 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/86162e2766a8:0, corePoolSize=3, maxPoolSize=3 2024-12-05T19:53:51,337 INFO [RS:1;86162e2766a8:40097 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-05T19:53:51,337 INFO [RS:1;86162e2766a8:40097 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-05T19:53:51,337 INFO [RS:0;86162e2766a8:40645 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-05T19:53:51,338 INFO [RS:1;86162e2766a8:40097 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-05T19:53:51,338 INFO [RS:0;86162e2766a8:40645 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-05T19:53:51,338 INFO [RS:1;86162e2766a8:40097 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-05T19:53:51,338 INFO [RS:0;86162e2766a8:40645 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-05T19:53:51,338 INFO [RS:0;86162e2766a8:40645 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-05T19:53:51,338 INFO [RS:2;86162e2766a8:39485 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-05T19:53:51,338 INFO [RS:1;86162e2766a8:40097 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-05T19:53:51,338 INFO [RS:0;86162e2766a8:40645 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-05T19:53:51,338 INFO [RS:2;86162e2766a8:39485 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-05T19:53:51,338 INFO [RS:1;86162e2766a8:40097 {}] hbase.ChoreService(168): Chore ScheduledChore name=86162e2766a8,40097,1733428431012-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-05T19:53:51,338 INFO [RS:2;86162e2766a8:39485 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-05T19:53:51,338 INFO [RS:0;86162e2766a8:40645 {}] hbase.ChoreService(168): Chore ScheduledChore name=86162e2766a8,40645,1733428430982-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-05T19:53:51,338 INFO [RS:2;86162e2766a8:39485 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-05T19:53:51,338 INFO [RS:2;86162e2766a8:39485 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-05T19:53:51,338 INFO [RS:2;86162e2766a8:39485 {}] hbase.ChoreService(168): Chore ScheduledChore name=86162e2766a8,39485,1733428431042-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-05T19:53:51,355 INFO [RS:2;86162e2766a8:39485 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-05T19:53:51,355 INFO [RS:2;86162e2766a8:39485 {}] hbase.ChoreService(168): Chore ScheduledChore name=86162e2766a8,39485,1733428431042-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-05T19:53:51,355 INFO [RS:2;86162e2766a8:39485 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-05T19:53:51,355 INFO [RS:2;86162e2766a8:39485 {}] regionserver.Replication(171): 86162e2766a8,39485,1733428431042 started 2024-12-05T19:53:51,362 INFO [RS:0;86162e2766a8:40645 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-05T19:53:51,363 INFO [RS:1;86162e2766a8:40097 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-05T19:53:51,363 INFO [RS:0;86162e2766a8:40645 {}] hbase.ChoreService(168): Chore ScheduledChore name=86162e2766a8,40645,1733428430982-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-05T19:53:51,363 INFO [RS:0;86162e2766a8:40645 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-05T19:53:51,363 INFO [RS:1;86162e2766a8:40097 {}] hbase.ChoreService(168): Chore ScheduledChore name=86162e2766a8,40097,1733428431012-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-05T19:53:51,363 INFO [RS:0;86162e2766a8:40645 {}] regionserver.Replication(171): 86162e2766a8,40645,1733428430982 started 2024-12-05T19:53:51,363 INFO [RS:1;86162e2766a8:40097 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-05T19:53:51,363 INFO [RS:1;86162e2766a8:40097 {}] regionserver.Replication(171): 86162e2766a8,40097,1733428431012 started 2024-12-05T19:53:51,370 INFO [RS:2;86162e2766a8:39485 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-05T19:53:51,371 INFO [RS:2;86162e2766a8:39485 {}] regionserver.HRegionServer(1482): Serving as 86162e2766a8,39485,1733428431042, RpcServer on 86162e2766a8/172.17.0.2:39485, sessionid=0x10063be81290003 2024-12-05T19:53:51,371 DEBUG [RS:2;86162e2766a8:39485 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-05T19:53:51,371 DEBUG [RS:2;86162e2766a8:39485 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 86162e2766a8,39485,1733428431042 2024-12-05T19:53:51,371 DEBUG [RS:2;86162e2766a8:39485 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '86162e2766a8,39485,1733428431042' 2024-12-05T19:53:51,371 DEBUG [RS:2;86162e2766a8:39485 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-05T19:53:51,372 DEBUG [RS:2;86162e2766a8:39485 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-05T19:53:51,372 DEBUG [RS:2;86162e2766a8:39485 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-05T19:53:51,372 DEBUG [RS:2;86162e2766a8:39485 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-05T19:53:51,372 DEBUG [RS:2;86162e2766a8:39485 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 86162e2766a8,39485,1733428431042 2024-12-05T19:53:51,372 DEBUG [RS:2;86162e2766a8:39485 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '86162e2766a8,39485,1733428431042' 2024-12-05T19:53:51,372 DEBUG [RS:2;86162e2766a8:39485 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-05T19:53:51,373 DEBUG [RS:2;86162e2766a8:39485 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-05T19:53:51,373 DEBUG [RS:2;86162e2766a8:39485 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-05T19:53:51,373 INFO [RS:2;86162e2766a8:39485 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-05T19:53:51,373 INFO [RS:2;86162e2766a8:39485 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-05T19:53:51,385 INFO [RS:0;86162e2766a8:40645 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-05T19:53:51,385 INFO [RS:1;86162e2766a8:40097 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-05T19:53:51,385 INFO [RS:0;86162e2766a8:40645 {}] regionserver.HRegionServer(1482): Serving as 86162e2766a8,40645,1733428430982, RpcServer on 86162e2766a8/172.17.0.2:40645, sessionid=0x10063be81290001 2024-12-05T19:53:51,385 INFO [RS:1;86162e2766a8:40097 {}] regionserver.HRegionServer(1482): Serving as 86162e2766a8,40097,1733428431012, RpcServer on 86162e2766a8/172.17.0.2:40097, sessionid=0x10063be81290002 2024-12-05T19:53:51,386 DEBUG [RS:0;86162e2766a8:40645 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-05T19:53:51,386 DEBUG [RS:0;86162e2766a8:40645 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 86162e2766a8,40645,1733428430982 2024-12-05T19:53:51,386 DEBUG [RS:1;86162e2766a8:40097 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-05T19:53:51,386 DEBUG [RS:0;86162e2766a8:40645 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '86162e2766a8,40645,1733428430982' 2024-12-05T19:53:51,386 DEBUG [RS:1;86162e2766a8:40097 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 86162e2766a8,40097,1733428431012 2024-12-05T19:53:51,386 DEBUG [RS:0;86162e2766a8:40645 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-05T19:53:51,386 DEBUG [RS:1;86162e2766a8:40097 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '86162e2766a8,40097,1733428431012' 2024-12-05T19:53:51,386 DEBUG [RS:1;86162e2766a8:40097 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-05T19:53:51,386 DEBUG [RS:0;86162e2766a8:40645 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-05T19:53:51,386 DEBUG [RS:1;86162e2766a8:40097 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-05T19:53:51,387 DEBUG [RS:0;86162e2766a8:40645 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-05T19:53:51,387 DEBUG [RS:0;86162e2766a8:40645 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-05T19:53:51,387 DEBUG [RS:0;86162e2766a8:40645 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 86162e2766a8,40645,1733428430982 2024-12-05T19:53:51,387 DEBUG [RS:0;86162e2766a8:40645 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '86162e2766a8,40645,1733428430982' 2024-12-05T19:53:51,387 DEBUG [RS:0;86162e2766a8:40645 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-05T19:53:51,387 DEBUG [RS:1;86162e2766a8:40097 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-05T19:53:51,387 DEBUG [RS:1;86162e2766a8:40097 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-05T19:53:51,387 DEBUG [RS:1;86162e2766a8:40097 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 86162e2766a8,40097,1733428431012 2024-12-05T19:53:51,387 DEBUG [RS:1;86162e2766a8:40097 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '86162e2766a8,40097,1733428431012' 2024-12-05T19:53:51,387 DEBUG [RS:1;86162e2766a8:40097 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-05T19:53:51,387 DEBUG [RS:0;86162e2766a8:40645 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-05T19:53:51,388 DEBUG [RS:1;86162e2766a8:40097 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-05T19:53:51,388 DEBUG [RS:0;86162e2766a8:40645 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-05T19:53:51,388 INFO [RS:0;86162e2766a8:40645 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-05T19:53:51,388 INFO [RS:0;86162e2766a8:40645 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-05T19:53:51,388 DEBUG [RS:1;86162e2766a8:40097 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-05T19:53:51,388 INFO [RS:1;86162e2766a8:40097 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-05T19:53:51,388 INFO [RS:1;86162e2766a8:40097 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-05T19:53:51,477 INFO [RS:2;86162e2766a8:39485 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=86162e2766a8%2C39485%2C1733428431042, suffix=, logDir=hdfs://localhost:42213/user/jenkins/test-data/539cdc5c-0834-9c01-3d36-d1b0b68d5527/WALs/86162e2766a8,39485,1733428431042, archiveDir=hdfs://localhost:42213/user/jenkins/test-data/539cdc5c-0834-9c01-3d36-d1b0b68d5527/oldWALs, maxLogs=32 2024-12-05T19:53:51,479 INFO [RS:2;86162e2766a8:39485 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 86162e2766a8%2C39485%2C1733428431042.1733428431479 2024-12-05T19:53:51,489 INFO [RS:2;86162e2766a8:39485 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/539cdc5c-0834-9c01-3d36-d1b0b68d5527/WALs/86162e2766a8,39485,1733428431042/86162e2766a8%2C39485%2C1733428431042.1733428431479 2024-12-05T19:53:51,491 DEBUG [RS:2;86162e2766a8:39485 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41515:41515),(127.0.0.1/127.0.0.1:35159:35159),(127.0.0.1/127.0.0.1:40321:40321)] 2024-12-05T19:53:51,491 INFO [RS:0;86162e2766a8:40645 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=86162e2766a8%2C40645%2C1733428430982, suffix=, logDir=hdfs://localhost:42213/user/jenkins/test-data/539cdc5c-0834-9c01-3d36-d1b0b68d5527/WALs/86162e2766a8,40645,1733428430982, archiveDir=hdfs://localhost:42213/user/jenkins/test-data/539cdc5c-0834-9c01-3d36-d1b0b68d5527/oldWALs, maxLogs=32 2024-12-05T19:53:51,491 INFO [RS:1;86162e2766a8:40097 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=86162e2766a8%2C40097%2C1733428431012, suffix=, logDir=hdfs://localhost:42213/user/jenkins/test-data/539cdc5c-0834-9c01-3d36-d1b0b68d5527/WALs/86162e2766a8,40097,1733428431012, archiveDir=hdfs://localhost:42213/user/jenkins/test-data/539cdc5c-0834-9c01-3d36-d1b0b68d5527/oldWALs, maxLogs=32 2024-12-05T19:53:51,496 INFO [RS:1;86162e2766a8:40097 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 86162e2766a8%2C40097%2C1733428431012.1733428431495 2024-12-05T19:53:51,496 INFO [RS:0;86162e2766a8:40645 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 86162e2766a8%2C40645%2C1733428430982.1733428431495 2024-12-05T19:53:51,507 INFO [RS:1;86162e2766a8:40097 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/539cdc5c-0834-9c01-3d36-d1b0b68d5527/WALs/86162e2766a8,40097,1733428431012/86162e2766a8%2C40097%2C1733428431012.1733428431495 2024-12-05T19:53:51,508 INFO [RS:0;86162e2766a8:40645 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/539cdc5c-0834-9c01-3d36-d1b0b68d5527/WALs/86162e2766a8,40645,1733428430982/86162e2766a8%2C40645%2C1733428430982.1733428431495 2024-12-05T19:53:51,518 DEBUG [RS:1;86162e2766a8:40097 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41515:41515),(127.0.0.1/127.0.0.1:35159:35159),(127.0.0.1/127.0.0.1:40321:40321)] 2024-12-05T19:53:51,520 DEBUG [RS:0;86162e2766a8:40645 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41515:41515),(127.0.0.1/127.0.0.1:35159:35159),(127.0.0.1/127.0.0.1:40321:40321)] 2024-12-05T19:53:51,707 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T19:53:51,710 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-05T19:53:51,711 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-05T19:53:51,712 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T19:53:51,712 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-05T19:53:51,712 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-05T19:53:51,714 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-05T19:53:51,714 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T19:53:51,715 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-05T19:53:51,715 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-05T19:53:51,716 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-05T19:53:51,716 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T19:53:51,717 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-05T19:53:51,717 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-05T19:53:51,719 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-05T19:53:51,719 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T19:53:51,720 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-05T19:53:51,720 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-05T19:53:51,721 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42213/user/jenkins/test-data/539cdc5c-0834-9c01-3d36-d1b0b68d5527/data/hbase/meta/1588230740 2024-12-05T19:53:51,721 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42213/user/jenkins/test-data/539cdc5c-0834-9c01-3d36-d1b0b68d5527/data/hbase/meta/1588230740 2024-12-05T19:53:51,723 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-05T19:53:51,723 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-05T19:53:51,723 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-05T19:53:51,725 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-05T19:53:51,727 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42213/user/jenkins/test-data/539cdc5c-0834-9c01-3d36-d1b0b68d5527/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-05T19:53:51,728 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=69855927, jitterRate=0.040934428572654724}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-05T19:53:51,729 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733428431708Initializing all the Stores at 1733428431709 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733428431709Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733428431709Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733428431709Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733428431709Cleaning up temporary data from old regions at 1733428431723 (+14 ms)Region opened successfully at 1733428431729 (+6 ms) 2024-12-05T19:53:51,729 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-05T19:53:51,729 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-05T19:53:51,729 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-05T19:53:51,729 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-05T19:53:51,729 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-05T19:53:51,729 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-05T19:53:51,730 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733428431729Disabling compacts and flushes for region at 1733428431729Disabling writes for close at 1733428431729Writing region close event to WAL at 1733428431729Closed at 1733428431729 2024-12-05T19:53:51,731 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-05T19:53:51,731 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-05T19:53:51,732 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-05T19:53:51,734 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-05T19:53:51,736 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-05T19:53:51,886 DEBUG [86162e2766a8:46269 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=3, allServersCount=3 2024-12-05T19:53:51,886 DEBUG [86162e2766a8:46269 {}] balancer.BalancerClusterState(204): Hosts are {86162e2766a8=0} racks are {/default-rack=0} 2024-12-05T19:53:51,889 DEBUG [86162e2766a8:46269 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-05T19:53:51,889 DEBUG [86162e2766a8:46269 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-05T19:53:51,889 DEBUG [86162e2766a8:46269 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-05T19:53:51,889 DEBUG [86162e2766a8:46269 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-05T19:53:51,889 DEBUG [86162e2766a8:46269 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-05T19:53:51,889 DEBUG [86162e2766a8:46269 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-05T19:53:51,889 INFO [86162e2766a8:46269 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-05T19:53:51,889 INFO [86162e2766a8:46269 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-05T19:53:51,889 INFO [86162e2766a8:46269 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-05T19:53:51,889 DEBUG [86162e2766a8:46269 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-05T19:53:51,890 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=86162e2766a8,39485,1733428431042 2024-12-05T19:53:51,892 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 86162e2766a8,39485,1733428431042, state=OPENING 2024-12-05T19:53:51,894 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-05T19:53:51,895 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39485-0x10063be81290003, quorum=127.0.0.1:64776, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T19:53:51,895 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40645-0x10063be81290001, quorum=127.0.0.1:64776, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T19:53:51,895 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46269-0x10063be81290000, quorum=127.0.0.1:64776, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T19:53:51,895 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40097-0x10063be81290002, quorum=127.0.0.1:64776, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T19:53:51,896 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-05T19:53:51,896 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-05T19:53:51,896 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=86162e2766a8,39485,1733428431042}] 2024-12-05T19:53:51,896 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-05T19:53:51,896 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-05T19:53:51,897 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-05T19:53:52,053 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-05T19:53:52,054 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-10-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33629, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-05T19:53:52,060 INFO [RS_OPEN_META-regionserver/86162e2766a8:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-05T19:53:52,060 INFO [RS_OPEN_META-regionserver/86162e2766a8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-05T19:53:52,063 INFO [RS_OPEN_META-regionserver/86162e2766a8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=86162e2766a8%2C39485%2C1733428431042.meta, suffix=.meta, logDir=hdfs://localhost:42213/user/jenkins/test-data/539cdc5c-0834-9c01-3d36-d1b0b68d5527/WALs/86162e2766a8,39485,1733428431042, archiveDir=hdfs://localhost:42213/user/jenkins/test-data/539cdc5c-0834-9c01-3d36-d1b0b68d5527/oldWALs, maxLogs=32 2024-12-05T19:53:52,064 INFO [RS_OPEN_META-regionserver/86162e2766a8:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 86162e2766a8%2C39485%2C1733428431042.meta.1733428432064.meta 2024-12-05T19:53:52,078 INFO [RS_OPEN_META-regionserver/86162e2766a8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/539cdc5c-0834-9c01-3d36-d1b0b68d5527/WALs/86162e2766a8,39485,1733428431042/86162e2766a8%2C39485%2C1733428431042.meta.1733428432064.meta 2024-12-05T19:53:52,083 DEBUG [RS_OPEN_META-regionserver/86162e2766a8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41515:41515),(127.0.0.1/127.0.0.1:35159:35159),(127.0.0.1/127.0.0.1:40321:40321)] 2024-12-05T19:53:52,084 DEBUG [RS_OPEN_META-regionserver/86162e2766a8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-05T19:53:52,085 DEBUG [RS_OPEN_META-regionserver/86162e2766a8:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-05T19:53:52,085 DEBUG [RS_OPEN_META-regionserver/86162e2766a8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-05T19:53:52,085 INFO [RS_OPEN_META-regionserver/86162e2766a8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-05T19:53:52,085 DEBUG [RS_OPEN_META-regionserver/86162e2766a8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-05T19:53:52,085 DEBUG [RS_OPEN_META-regionserver/86162e2766a8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T19:53:52,085 DEBUG [RS_OPEN_META-regionserver/86162e2766a8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-05T19:53:52,085 DEBUG [RS_OPEN_META-regionserver/86162e2766a8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-05T19:53:52,088 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-05T19:53:52,089 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-05T19:53:52,089 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T19:53:52,090 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-05T19:53:52,090 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-05T19:53:52,091 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-05T19:53:52,091 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T19:53:52,092 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-05T19:53:52,092 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-05T19:53:52,093 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-05T19:53:52,093 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T19:53:52,094 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-05T19:53:52,094 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-05T19:53:52,095 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-05T19:53:52,095 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T19:53:52,095 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-05T19:53:52,096 DEBUG [RS_OPEN_META-regionserver/86162e2766a8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-05T19:53:52,097 DEBUG [RS_OPEN_META-regionserver/86162e2766a8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42213/user/jenkins/test-data/539cdc5c-0834-9c01-3d36-d1b0b68d5527/data/hbase/meta/1588230740 2024-12-05T19:53:52,098 DEBUG [RS_OPEN_META-regionserver/86162e2766a8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42213/user/jenkins/test-data/539cdc5c-0834-9c01-3d36-d1b0b68d5527/data/hbase/meta/1588230740 2024-12-05T19:53:52,100 DEBUG [RS_OPEN_META-regionserver/86162e2766a8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-05T19:53:52,100 DEBUG [RS_OPEN_META-regionserver/86162e2766a8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-05T19:53:52,100 DEBUG [RS_OPEN_META-regionserver/86162e2766a8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-05T19:53:52,102 DEBUG [RS_OPEN_META-regionserver/86162e2766a8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-05T19:53:52,104 INFO [RS_OPEN_META-regionserver/86162e2766a8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=68115755, jitterRate=0.01500384509563446}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-05T19:53:52,104 DEBUG [RS_OPEN_META-regionserver/86162e2766a8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-05T19:53:52,105 DEBUG [RS_OPEN_META-regionserver/86162e2766a8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733428432086Writing region info on filesystem at 1733428432086Initializing all the Stores at 1733428432087 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733428432087Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733428432087Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733428432087Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733428432087Cleaning up temporary data from old regions at 1733428432100 (+13 ms)Running coprocessor post-open hooks at 1733428432104 (+4 ms)Region opened successfully at 1733428432105 (+1 ms) 2024-12-05T19:53:52,107 INFO [RS_OPEN_META-regionserver/86162e2766a8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733428432052 2024-12-05T19:53:52,111 DEBUG [RS_OPEN_META-regionserver/86162e2766a8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-05T19:53:52,111 INFO [RS_OPEN_META-regionserver/86162e2766a8:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-05T19:53:52,113 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=86162e2766a8,39485,1733428431042 2024-12-05T19:53:52,114 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 86162e2766a8,39485,1733428431042, state=OPEN 2024-12-05T19:53:52,116 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40645-0x10063be81290001, quorum=127.0.0.1:64776, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-05T19:53:52,116 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39485-0x10063be81290003, quorum=127.0.0.1:64776, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-05T19:53:52,116 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40097-0x10063be81290002, quorum=127.0.0.1:64776, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-05T19:53:52,116 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46269-0x10063be81290000, quorum=127.0.0.1:64776, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-05T19:53:52,116 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-05T19:53:52,116 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=86162e2766a8,39485,1733428431042 2024-12-05T19:53:52,116 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-05T19:53:52,116 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-05T19:53:52,117 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-05T19:53:52,121 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-05T19:53:52,121 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=86162e2766a8,39485,1733428431042 in 220 msec 2024-12-05T19:53:52,125 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-05T19:53:52,125 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 389 msec 2024-12-05T19:53:52,127 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-05T19:53:52,127 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-05T19:53:52,129 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-05T19:53:52,129 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=86162e2766a8,39485,1733428431042, seqNum=-1] 2024-12-05T19:53:52,130 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-05T19:53:52,131 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40259, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-05T19:53:52,140 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 895 msec 2024-12-05T19:53:52,141 INFO [master/86162e2766a8:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733428432141, completionTime=-1 2024-12-05T19:53:52,141 INFO [master/86162e2766a8:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=3; waited=0ms, expected min=3 server(s), max=3 server(s), master is running 2024-12-05T19:53:52,141 DEBUG [master/86162e2766a8:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-05T19:53:52,143 INFO [master/86162e2766a8:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=3 2024-12-05T19:53:52,144 INFO [master/86162e2766a8:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733428492143 2024-12-05T19:53:52,144 INFO [master/86162e2766a8:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733428552144 2024-12-05T19:53:52,144 INFO [master/86162e2766a8:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-12-05T19:53:52,144 INFO [master/86162e2766a8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=86162e2766a8,46269,1733428430935-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-05T19:53:52,144 INFO [master/86162e2766a8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=86162e2766a8,46269,1733428430935-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-05T19:53:52,144 INFO [master/86162e2766a8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=86162e2766a8,46269,1733428430935-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-05T19:53:52,144 INFO [master/86162e2766a8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-86162e2766a8:46269, period=300000, unit=MILLISECONDS is enabled. 2024-12-05T19:53:52,144 INFO [master/86162e2766a8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-05T19:53:52,145 INFO [master/86162e2766a8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-05T19:53:52,147 DEBUG [master/86162e2766a8:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-05T19:53:52,150 INFO [master/86162e2766a8:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.070sec 2024-12-05T19:53:52,150 INFO [master/86162e2766a8:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-05T19:53:52,150 INFO [master/86162e2766a8:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-05T19:53:52,150 INFO [master/86162e2766a8:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-05T19:53:52,150 INFO [master/86162e2766a8:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-05T19:53:52,150 INFO [master/86162e2766a8:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-05T19:53:52,150 INFO [master/86162e2766a8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=86162e2766a8,46269,1733428430935-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-05T19:53:52,150 INFO [master/86162e2766a8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=86162e2766a8,46269,1733428430935-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-05T19:53:52,153 DEBUG [master/86162e2766a8:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-05T19:53:52,153 INFO [master/86162e2766a8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-05T19:53:52,154 INFO [master/86162e2766a8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=86162e2766a8,46269,1733428430935-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-05T19:53:52,160 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1f519320, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T19:53:52,161 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 86162e2766a8,46269,-1 for getting cluster id 2024-12-05T19:53:52,161 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-05T19:53:52,162 DEBUG [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '62ccd1a9-18a9-4ac6-802c-88ff2711da26' 2024-12-05T19:53:52,163 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-05T19:53:52,163 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "62ccd1a9-18a9-4ac6-802c-88ff2711da26" 2024-12-05T19:53:52,163 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@20a72f38, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T19:53:52,163 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [86162e2766a8,46269,-1] 2024-12-05T19:53:52,164 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-05T19:53:52,164 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T19:53:52,166 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36982, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-05T19:53:52,167 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3515910a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T19:53:52,167 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-05T19:53:52,169 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=86162e2766a8,39485,1733428431042, seqNum=-1] 2024-12-05T19:53:52,170 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-05T19:53:52,171 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-10-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38496, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-05T19:53:52,174 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=86162e2766a8,46269,1733428430935 2024-12-05T19:53:52,175 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-05T19:53:52,176 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] client.AsyncConnectionImpl(321): The fetched master address is 86162e2766a8,46269,1733428430935 2024-12-05T19:53:52,176 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@659d6689 2024-12-05T19:53:52,177 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-05T19:53:52,179 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36992, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-05T19:53:52,180 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46269 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-05T19:53:52,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46269 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC 2024-12-05T19:53:52,184 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_PRE_OPERATION 2024-12-05T19:53:52,184 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T19:53:52,184 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46269 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestHBaseWalOnEC" procId is: 4 2024-12-05T19:53:52,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46269 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-05T19:53:52,186 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-05T19:53:52,205 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44737 is added to blk_1073741837_1013 (size=392) 2024-12-05T19:53:52,205 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38381 is added to blk_1073741837_1013 (size=392) 2024-12-05T19:53:52,205 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44597 is added to blk_1073741837_1013 (size=392) 2024-12-05T19:53:52,209 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 0e62de7f5356c8f81409a410063cbc54, NAME => 'TestHBaseWalOnEC,,1733428432179.0e62de7f5356c8f81409a410063cbc54.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:42213/user/jenkins/test-data/539cdc5c-0834-9c01-3d36-d1b0b68d5527 2024-12-05T19:53:52,221 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44597 is added to blk_1073741838_1014 (size=51) 2024-12-05T19:53:52,222 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44737 is added to blk_1073741838_1014 (size=51) 2024-12-05T19:53:52,222 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38381 is added to blk_1073741838_1014 (size=51) 2024-12-05T19:53:52,223 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1733428432179.0e62de7f5356c8f81409a410063cbc54.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T19:53:52,223 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1722): Closing 0e62de7f5356c8f81409a410063cbc54, disabling compactions & flushes 2024-12-05T19:53:52,223 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1733428432179.0e62de7f5356c8f81409a410063cbc54. 2024-12-05T19:53:52,223 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1733428432179.0e62de7f5356c8f81409a410063cbc54. 2024-12-05T19:53:52,223 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1733428432179.0e62de7f5356c8f81409a410063cbc54. after waiting 0 ms 2024-12-05T19:53:52,223 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1733428432179.0e62de7f5356c8f81409a410063cbc54. 2024-12-05T19:53:52,223 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1733428432179.0e62de7f5356c8f81409a410063cbc54. 2024-12-05T19:53:52,223 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1676): Region close journal for 0e62de7f5356c8f81409a410063cbc54: Waiting for close lock at 1733428432223Disabling compacts and flushes for region at 1733428432223Disabling writes for close at 1733428432223Writing region close event to WAL at 1733428432223Closed at 1733428432223 2024-12-05T19:53:52,226 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ADD_TO_META 2024-12-05T19:53:52,226 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestHBaseWalOnEC,,1733428432179.0e62de7f5356c8f81409a410063cbc54.","families":{"info":[{"qualifier":"regioninfo","vlen":50,"tag":[],"timestamp":"1733428432226"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733428432226"}]},"ts":"1733428432226"} 2024-12-05T19:53:52,229 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-05T19:53:52,231 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-05T19:53:52,231 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733428432231"}]},"ts":"1733428432231"} 2024-12-05T19:53:52,235 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLING in hbase:meta 2024-12-05T19:53:52,235 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {86162e2766a8=0} racks are {/default-rack=0} 2024-12-05T19:53:52,236 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-05T19:53:52,236 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-05T19:53:52,236 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-05T19:53:52,236 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-05T19:53:52,236 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-05T19:53:52,236 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-05T19:53:52,236 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-05T19:53:52,236 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-05T19:53:52,236 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-05T19:53:52,236 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-05T19:53:52,237 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=0e62de7f5356c8f81409a410063cbc54, ASSIGN}] 2024-12-05T19:53:52,239 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=0e62de7f5356c8f81409a410063cbc54, ASSIGN 2024-12-05T19:53:52,241 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=0e62de7f5356c8f81409a410063cbc54, ASSIGN; state=OFFLINE, location=86162e2766a8,40097,1733428431012; forceNewPlan=false, retain=false 2024-12-05T19:53:52,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46269 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-05T19:53:52,392 INFO [86162e2766a8:46269 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-05T19:53:52,392 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=0e62de7f5356c8f81409a410063cbc54, regionState=OPENING, regionLocation=86162e2766a8,40097,1733428431012 2024-12-05T19:53:52,397 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-10-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=0e62de7f5356c8f81409a410063cbc54, ASSIGN because future has completed 2024-12-05T19:53:52,398 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 0e62de7f5356c8f81409a410063cbc54, server=86162e2766a8,40097,1733428431012}] 2024-12-05T19:53:52,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46269 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-05T19:53:52,553 DEBUG [RSProcedureDispatcher-pool-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-05T19:53:52,555 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-9-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43529, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-05T19:53:52,559 INFO [RS_OPEN_REGION-regionserver/86162e2766a8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestHBaseWalOnEC,,1733428432179.0e62de7f5356c8f81409a410063cbc54. 2024-12-05T19:53:52,559 DEBUG [RS_OPEN_REGION-regionserver/86162e2766a8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 0e62de7f5356c8f81409a410063cbc54, NAME => 'TestHBaseWalOnEC,,1733428432179.0e62de7f5356c8f81409a410063cbc54.', STARTKEY => '', ENDKEY => ''} 2024-12-05T19:53:52,560 DEBUG [RS_OPEN_REGION-regionserver/86162e2766a8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestHBaseWalOnEC 0e62de7f5356c8f81409a410063cbc54 2024-12-05T19:53:52,560 DEBUG [RS_OPEN_REGION-regionserver/86162e2766a8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1733428432179.0e62de7f5356c8f81409a410063cbc54.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T19:53:52,560 DEBUG [RS_OPEN_REGION-regionserver/86162e2766a8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 0e62de7f5356c8f81409a410063cbc54 2024-12-05T19:53:52,560 DEBUG [RS_OPEN_REGION-regionserver/86162e2766a8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 0e62de7f5356c8f81409a410063cbc54 2024-12-05T19:53:52,562 INFO [StoreOpener-0e62de7f5356c8f81409a410063cbc54-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 0e62de7f5356c8f81409a410063cbc54 2024-12-05T19:53:52,563 INFO [StoreOpener-0e62de7f5356c8f81409a410063cbc54-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 0e62de7f5356c8f81409a410063cbc54 columnFamilyName cf 2024-12-05T19:53:52,563 DEBUG [StoreOpener-0e62de7f5356c8f81409a410063cbc54-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T19:53:52,564 INFO [StoreOpener-0e62de7f5356c8f81409a410063cbc54-1 {}] regionserver.HStore(327): Store=0e62de7f5356c8f81409a410063cbc54/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T19:53:52,564 DEBUG [RS_OPEN_REGION-regionserver/86162e2766a8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 0e62de7f5356c8f81409a410063cbc54 2024-12-05T19:53:52,565 DEBUG [RS_OPEN_REGION-regionserver/86162e2766a8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42213/user/jenkins/test-data/539cdc5c-0834-9c01-3d36-d1b0b68d5527/data/default/TestHBaseWalOnEC/0e62de7f5356c8f81409a410063cbc54 2024-12-05T19:53:52,566 DEBUG [RS_OPEN_REGION-regionserver/86162e2766a8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42213/user/jenkins/test-data/539cdc5c-0834-9c01-3d36-d1b0b68d5527/data/default/TestHBaseWalOnEC/0e62de7f5356c8f81409a410063cbc54 2024-12-05T19:53:52,566 DEBUG [RS_OPEN_REGION-regionserver/86162e2766a8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 0e62de7f5356c8f81409a410063cbc54 2024-12-05T19:53:52,566 DEBUG [RS_OPEN_REGION-regionserver/86162e2766a8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 0e62de7f5356c8f81409a410063cbc54 2024-12-05T19:53:52,568 DEBUG [RS_OPEN_REGION-regionserver/86162e2766a8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 0e62de7f5356c8f81409a410063cbc54 2024-12-05T19:53:52,570 DEBUG [RS_OPEN_REGION-regionserver/86162e2766a8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42213/user/jenkins/test-data/539cdc5c-0834-9c01-3d36-d1b0b68d5527/data/default/TestHBaseWalOnEC/0e62de7f5356c8f81409a410063cbc54/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-05T19:53:52,571 INFO [RS_OPEN_REGION-regionserver/86162e2766a8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 0e62de7f5356c8f81409a410063cbc54; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=69958327, jitterRate=0.042460307478904724}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-05T19:53:52,571 DEBUG [RS_OPEN_REGION-regionserver/86162e2766a8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 0e62de7f5356c8f81409a410063cbc54 2024-12-05T19:53:52,572 DEBUG [RS_OPEN_REGION-regionserver/86162e2766a8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 0e62de7f5356c8f81409a410063cbc54: Running coprocessor pre-open hook at 1733428432560Writing region info on filesystem at 1733428432560Initializing all the Stores at 1733428432561 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733428432561Cleaning up temporary data from old regions at 1733428432566 (+5 ms)Running coprocessor post-open hooks at 1733428432571 (+5 ms)Region opened successfully at 1733428432572 (+1 ms) 2024-12-05T19:53:52,574 INFO [RS_OPEN_REGION-regionserver/86162e2766a8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestHBaseWalOnEC,,1733428432179.0e62de7f5356c8f81409a410063cbc54., pid=6, masterSystemTime=1733428432552 2024-12-05T19:53:52,577 DEBUG [RS_OPEN_REGION-regionserver/86162e2766a8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestHBaseWalOnEC,,1733428432179.0e62de7f5356c8f81409a410063cbc54. 2024-12-05T19:53:52,578 INFO [RS_OPEN_REGION-regionserver/86162e2766a8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestHBaseWalOnEC,,1733428432179.0e62de7f5356c8f81409a410063cbc54. 2024-12-05T19:53:52,579 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=0e62de7f5356c8f81409a410063cbc54, regionState=OPEN, openSeqNum=2, regionLocation=86162e2766a8,40097,1733428431012 2024-12-05T19:53:52,582 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-10-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 0e62de7f5356c8f81409a410063cbc54, server=86162e2766a8,40097,1733428431012 because future has completed 2024-12-05T19:53:52,589 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-05T19:53:52,589 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 0e62de7f5356c8f81409a410063cbc54, server=86162e2766a8,40097,1733428431012 in 187 msec 2024-12-05T19:53:52,593 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-05T19:53:52,594 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=0e62de7f5356c8f81409a410063cbc54, ASSIGN in 352 msec 2024-12-05T19:53:52,595 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-05T19:53:52,596 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733428432595"}]},"ts":"1733428432595"} 2024-12-05T19:53:52,599 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLED in hbase:meta 2024-12-05T19:53:52,601 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_POST_OPERATION 2024-12-05T19:53:52,604 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC in 421 msec 2024-12-05T19:53:52,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46269 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-05T19:53:52,815 INFO [RPCClient-NioEventLoopGroup-6-8 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestHBaseWalOnEC completed 2024-12-05T19:53:52,815 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table TestHBaseWalOnEC get assigned. Timeout = 60000ms 2024-12-05T19:53:52,816 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-05T19:53:52,819 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table TestHBaseWalOnEC assigned to meta. Checking AM states. 2024-12-05T19:53:52,819 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-05T19:53:52,820 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table TestHBaseWalOnEC assigned. 2024-12-05T19:53:52,823 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestHBaseWalOnEC', row='row', locateType=CURRENT is [region=TestHBaseWalOnEC,,1733428432179.0e62de7f5356c8f81409a410063cbc54., hostname=86162e2766a8,40097,1733428431012, seqNum=2] 2024-12-05T19:53:52,823 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-05T19:53:52,825 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-9-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55042, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-05T19:53:52,829 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46269 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestHBaseWalOnEC 2024-12-05T19:53:52,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46269 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC 2024-12-05T19:53:52,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46269 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-05T19:53:52,833 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_PREPARE 2024-12-05T19:53:52,834 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-05T19:53:52,834 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-05T19:53:52,849 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-05T19:53:52,856 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-05T19:53:52,918 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-05T19:53:52,919 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-05T19:53:52,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46269 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-05T19:53:52,989 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40097 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-12-05T19:53:52,990 DEBUG [RS_FLUSH_OPERATIONS-regionserver/86162e2766a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestHBaseWalOnEC,,1733428432179.0e62de7f5356c8f81409a410063cbc54. 2024-12-05T19:53:52,990 INFO [RS_FLUSH_OPERATIONS-regionserver/86162e2766a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing 0e62de7f5356c8f81409a410063cbc54 1/1 column families, dataSize=32 B heapSize=360 B 2024-12-05T19:53:53,009 DEBUG [RS_FLUSH_OPERATIONS-regionserver/86162e2766a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42213/user/jenkins/test-data/539cdc5c-0834-9c01-3d36-d1b0b68d5527/data/default/TestHBaseWalOnEC/0e62de7f5356c8f81409a410063cbc54/.tmp/cf/19a05eed578b4f3a9aa1afe90c6c0ba0 is 36, key is row/cf:cq/1733428432826/Put/seqid=0 2024-12-05T19:53:53,034 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38381 is added to blk_1073741839_1015 (size=4787) 2024-12-05T19:53:53,034 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44597 is added to blk_1073741839_1015 (size=4787) 2024-12-05T19:53:53,036 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44737 is added to blk_1073741839_1015 (size=4787) 2024-12-05T19:53:53,037 INFO [RS_FLUSH_OPERATIONS-regionserver/86162e2766a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=32 B at sequenceid=5 (bloomFilter=false), to=hdfs://localhost:42213/user/jenkins/test-data/539cdc5c-0834-9c01-3d36-d1b0b68d5527/data/default/TestHBaseWalOnEC/0e62de7f5356c8f81409a410063cbc54/.tmp/cf/19a05eed578b4f3a9aa1afe90c6c0ba0 2024-12-05T19:53:53,049 DEBUG [RS_FLUSH_OPERATIONS-regionserver/86162e2766a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42213/user/jenkins/test-data/539cdc5c-0834-9c01-3d36-d1b0b68d5527/data/default/TestHBaseWalOnEC/0e62de7f5356c8f81409a410063cbc54/.tmp/cf/19a05eed578b4f3a9aa1afe90c6c0ba0 as hdfs://localhost:42213/user/jenkins/test-data/539cdc5c-0834-9c01-3d36-d1b0b68d5527/data/default/TestHBaseWalOnEC/0e62de7f5356c8f81409a410063cbc54/cf/19a05eed578b4f3a9aa1afe90c6c0ba0 2024-12-05T19:53:53,064 INFO [RS_FLUSH_OPERATIONS-regionserver/86162e2766a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42213/user/jenkins/test-data/539cdc5c-0834-9c01-3d36-d1b0b68d5527/data/default/TestHBaseWalOnEC/0e62de7f5356c8f81409a410063cbc54/cf/19a05eed578b4f3a9aa1afe90c6c0ba0, entries=1, sequenceid=5, filesize=4.7 K 2024-12-05T19:53:53,069 INFO [RS_FLUSH_OPERATIONS-regionserver/86162e2766a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~32 B/32, heapSize ~344 B/344, currentSize=0 B/0 for 0e62de7f5356c8f81409a410063cbc54 in 79ms, sequenceid=5, compaction requested=false 2024-12-05T19:53:53,069 DEBUG [RS_FLUSH_OPERATIONS-regionserver/86162e2766a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for 0e62de7f5356c8f81409a410063cbc54: 2024-12-05T19:53:53,069 DEBUG [RS_FLUSH_OPERATIONS-regionserver/86162e2766a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestHBaseWalOnEC,,1733428432179.0e62de7f5356c8f81409a410063cbc54. 2024-12-05T19:53:53,069 DEBUG [RS_FLUSH_OPERATIONS-regionserver/86162e2766a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-12-05T19:53:53,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46269 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-12-05T19:53:53,076 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-12-05T19:53:53,076 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 238 msec 2024-12-05T19:53:53,080 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC in 248 msec 2024-12-05T19:53:53,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46269 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-05T19:53:53,156 INFO [RPCClient-NioEventLoopGroup-6-8 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestHBaseWalOnEC completed 2024-12-05T19:53:53,162 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-05T19:53:53,162 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-05T19:53:53,162 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-05T19:53:53,162 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T19:53:53,162 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T19:53:53,163 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-05T19:53:53,163 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=257726740, stopped=false 2024-12-05T19:53:53,163 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=86162e2766a8,46269,1733428430935 2024-12-05T19:53:53,163 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-05T19:53:53,165 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40645-0x10063be81290001, quorum=127.0.0.1:64776, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-05T19:53:53,165 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40097-0x10063be81290002, quorum=127.0.0.1:64776, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-05T19:53:53,165 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40645-0x10063be81290001, quorum=127.0.0.1:64776, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T19:53:53,165 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46269-0x10063be81290000, quorum=127.0.0.1:64776, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-05T19:53:53,165 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40097-0x10063be81290002, quorum=127.0.0.1:64776, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T19:53:53,165 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39485-0x10063be81290003, quorum=127.0.0.1:64776, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-05T19:53:53,165 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46269-0x10063be81290000, quorum=127.0.0.1:64776, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T19:53:53,165 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39485-0x10063be81290003, quorum=127.0.0.1:64776, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T19:53:53,165 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-05T19:53:53,166 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-05T19:53:53,166 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-05T19:53:53,166 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T19:53:53,166 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '86162e2766a8,40645,1733428430982' ***** 2024-12-05T19:53:53,166 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-05T19:53:53,166 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '86162e2766a8,40097,1733428431012' ***** 2024-12-05T19:53:53,166 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:40645-0x10063be81290001, quorum=127.0.0.1:64776, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-05T19:53:53,166 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-05T19:53:53,166 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '86162e2766a8,39485,1733428431042' ***** 2024-12-05T19:53:53,167 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-05T19:53:53,167 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:40097-0x10063be81290002, quorum=127.0.0.1:64776, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-05T19:53:53,167 INFO [RS:2;86162e2766a8:39485 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-05T19:53:53,167 INFO [RS:2;86162e2766a8:39485 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-05T19:53:53,167 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:39485-0x10063be81290003, quorum=127.0.0.1:64776, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-05T19:53:53,167 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:46269-0x10063be81290000, quorum=127.0.0.1:64776, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-05T19:53:53,167 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-05T19:53:53,168 INFO [RS:2;86162e2766a8:39485 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-05T19:53:53,168 INFO [RS:0;86162e2766a8:40645 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-05T19:53:53,168 INFO [RS:2;86162e2766a8:39485 {}] regionserver.HRegionServer(959): stopping server 86162e2766a8,39485,1733428431042 2024-12-05T19:53:53,168 INFO [RS:1;86162e2766a8:40097 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-05T19:53:53,168 INFO [RS:2;86162e2766a8:39485 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-05T19:53:53,168 INFO [RS:0;86162e2766a8:40645 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-05T19:53:53,168 INFO [RS:2;86162e2766a8:39485 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:2;86162e2766a8:39485. 2024-12-05T19:53:53,168 INFO [RS:0;86162e2766a8:40645 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-05T19:53:53,168 INFO [RS:1;86162e2766a8:40097 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-05T19:53:53,168 INFO [RS:0;86162e2766a8:40645 {}] regionserver.HRegionServer(959): stopping server 86162e2766a8,40645,1733428430982 2024-12-05T19:53:53,168 INFO [RS:1;86162e2766a8:40097 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-05T19:53:53,168 DEBUG [RS:2;86162e2766a8:39485 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-05T19:53:53,168 INFO [RS:0;86162e2766a8:40645 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-05T19:53:53,168 INFO [RS:1;86162e2766a8:40097 {}] regionserver.HRegionServer(3091): Received CLOSE for 0e62de7f5356c8f81409a410063cbc54 2024-12-05T19:53:53,168 DEBUG [RS:2;86162e2766a8:39485 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T19:53:53,168 INFO [RS:0;86162e2766a8:40645 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;86162e2766a8:40645. 2024-12-05T19:53:53,168 INFO [RS:2;86162e2766a8:39485 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-05T19:53:53,168 INFO [RS:2;86162e2766a8:39485 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-05T19:53:53,168 DEBUG [RS:0;86162e2766a8:40645 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-05T19:53:53,168 INFO [RS:2;86162e2766a8:39485 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-05T19:53:53,168 DEBUG [RS:0;86162e2766a8:40645 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T19:53:53,168 INFO [RS:2;86162e2766a8:39485 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-05T19:53:53,169 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-05T19:53:53,169 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-05T19:53:53,169 INFO [RS:0;86162e2766a8:40645 {}] regionserver.HRegionServer(976): stopping server 86162e2766a8,40645,1733428430982; all regions closed. 2024-12-05T19:53:53,169 INFO [RS:2;86162e2766a8:39485 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-12-05T19:53:53,170 DEBUG [RS:2;86162e2766a8:39485 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-12-05T19:53:53,170 DEBUG [RS:2;86162e2766a8:39485 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-12-05T19:53:53,170 DEBUG [RS_CLOSE_META-regionserver/86162e2766a8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-05T19:53:53,170 INFO [RS_CLOSE_META-regionserver/86162e2766a8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-05T19:53:53,170 DEBUG [RS_CLOSE_META-regionserver/86162e2766a8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-05T19:53:53,170 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T19:53:53,170 DEBUG [RS_CLOSE_META-regionserver/86162e2766a8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-05T19:53:53,170 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T19:53:53,170 DEBUG [RS_CLOSE_META-regionserver/86162e2766a8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-05T19:53:53,170 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T19:53:53,171 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T19:53:53,171 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T19:53:53,172 INFO [RS:1;86162e2766a8:40097 {}] regionserver.HRegionServer(959): stopping server 86162e2766a8,40097,1733428431012 2024-12-05T19:53:53,172 INFO [RS:1;86162e2766a8:40097 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-05T19:53:53,172 INFO [RS:1;86162e2766a8:40097 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;86162e2766a8:40097. 2024-12-05T19:53:53,172 DEBUG [RS_CLOSE_REGION-regionserver/86162e2766a8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 0e62de7f5356c8f81409a410063cbc54, disabling compactions & flushes 2024-12-05T19:53:53,172 INFO [RS_CLOSE_REGION-regionserver/86162e2766a8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1733428432179.0e62de7f5356c8f81409a410063cbc54. 2024-12-05T19:53:53,172 DEBUG [RS_CLOSE_REGION-regionserver/86162e2766a8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1733428432179.0e62de7f5356c8f81409a410063cbc54. 2024-12-05T19:53:53,172 DEBUG [RS_CLOSE_REGION-regionserver/86162e2766a8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1733428432179.0e62de7f5356c8f81409a410063cbc54. after waiting 0 ms 2024-12-05T19:53:53,172 DEBUG [RS_CLOSE_REGION-regionserver/86162e2766a8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1733428432179.0e62de7f5356c8f81409a410063cbc54. 2024-12-05T19:53:53,175 INFO [RS_CLOSE_META-regionserver/86162e2766a8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.34 KB heapSize=3.38 KB 2024-12-05T19:53:53,175 DEBUG [RS:1;86162e2766a8:40097 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-05T19:53:53,175 DEBUG [RS:1;86162e2766a8:40097 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T19:53:53,175 INFO [RS:1;86162e2766a8:40097 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-12-05T19:53:53,175 DEBUG [RS:1;86162e2766a8:40097 {}] regionserver.HRegionServer(1325): Online Regions={0e62de7f5356c8f81409a410063cbc54=TestHBaseWalOnEC,,1733428432179.0e62de7f5356c8f81409a410063cbc54.} 2024-12-05T19:53:53,175 DEBUG [RS:1;86162e2766a8:40097 {}] regionserver.HRegionServer(1351): Waiting on 0e62de7f5356c8f81409a410063cbc54 2024-12-05T19:53:53,179 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38381 is added to blk_1073741835_1011 (size=93) 2024-12-05T19:53:53,182 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44597 is added to blk_1073741835_1011 (size=93) 2024-12-05T19:53:53,182 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44737 is added to blk_1073741835_1011 (size=93) 2024-12-05T19:53:53,183 DEBUG [RS_CLOSE_REGION-regionserver/86162e2766a8:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42213/user/jenkins/test-data/539cdc5c-0834-9c01-3d36-d1b0b68d5527/data/default/TestHBaseWalOnEC/0e62de7f5356c8f81409a410063cbc54/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-05T19:53:53,184 INFO [RS_CLOSE_REGION-regionserver/86162e2766a8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1733428432179.0e62de7f5356c8f81409a410063cbc54. 2024-12-05T19:53:53,184 DEBUG [RS_CLOSE_REGION-regionserver/86162e2766a8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 0e62de7f5356c8f81409a410063cbc54: Waiting for close lock at 1733428433172Running coprocessor pre-close hooks at 1733428433172Disabling compacts and flushes for region at 1733428433172Disabling writes for close at 1733428433172Writing region close event to WAL at 1733428433173 (+1 ms)Running coprocessor post-close hooks at 1733428433184 (+11 ms)Closed at 1733428433184 2024-12-05T19:53:53,184 DEBUG [RS_CLOSE_REGION-regionserver/86162e2766a8:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestHBaseWalOnEC,,1733428432179.0e62de7f5356c8f81409a410063cbc54. 2024-12-05T19:53:53,184 DEBUG [RS:0;86162e2766a8:40645 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/539cdc5c-0834-9c01-3d36-d1b0b68d5527/oldWALs 2024-12-05T19:53:53,184 INFO [RS:0;86162e2766a8:40645 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 86162e2766a8%2C40645%2C1733428430982:(num 1733428431495) 2024-12-05T19:53:53,184 DEBUG [RS:0;86162e2766a8:40645 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T19:53:53,185 INFO [RS:0;86162e2766a8:40645 {}] regionserver.LeaseManager(133): Closed leases 2024-12-05T19:53:53,185 INFO [RS:0;86162e2766a8:40645 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-05T19:53:53,185 INFO [RS:0;86162e2766a8:40645 {}] hbase.ChoreService(370): Chore service for: regionserver/86162e2766a8:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-05T19:53:53,185 INFO [RS:0;86162e2766a8:40645 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-05T19:53:53,185 INFO [RS:0;86162e2766a8:40645 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-05T19:53:53,185 INFO [regionserver/86162e2766a8:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-05T19:53:53,185 INFO [RS:0;86162e2766a8:40645 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-05T19:53:53,185 INFO [RS:0;86162e2766a8:40645 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-05T19:53:53,185 INFO [RS:0;86162e2766a8:40645 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:40645 2024-12-05T19:53:53,187 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40645-0x10063be81290001, quorum=127.0.0.1:64776, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/86162e2766a8,40645,1733428430982 2024-12-05T19:53:53,187 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46269-0x10063be81290000, quorum=127.0.0.1:64776, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-05T19:53:53,187 INFO [RS:0;86162e2766a8:40645 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-05T19:53:53,188 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [86162e2766a8,40645,1733428430982] 2024-12-05T19:53:53,190 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/86162e2766a8,40645,1733428430982 already deleted, retry=false 2024-12-05T19:53:53,190 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 86162e2766a8,40645,1733428430982 expired; onlineServers=2 2024-12-05T19:53:53,198 DEBUG [RS_CLOSE_META-regionserver/86162e2766a8:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42213/user/jenkins/test-data/539cdc5c-0834-9c01-3d36-d1b0b68d5527/data/hbase/meta/1588230740/.tmp/info/e48b273aa9e34fd6a2c3c58f764e6d45 is 153, key is TestHBaseWalOnEC,,1733428432179.0e62de7f5356c8f81409a410063cbc54./info:regioninfo/1733428432579/Put/seqid=0 2024-12-05T19:53:53,205 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38381 is added to blk_1073741840_1016 (size=6637) 2024-12-05T19:53:53,206 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44597 is added to blk_1073741840_1016 (size=6637) 2024-12-05T19:53:53,206 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44737 is added to blk_1073741840_1016 (size=6637) 2024-12-05T19:53:53,207 INFO [RS_CLOSE_META-regionserver/86162e2766a8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.18 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:42213/user/jenkins/test-data/539cdc5c-0834-9c01-3d36-d1b0b68d5527/data/hbase/meta/1588230740/.tmp/info/e48b273aa9e34fd6a2c3c58f764e6d45 2024-12-05T19:53:53,231 DEBUG [RS_CLOSE_META-regionserver/86162e2766a8:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42213/user/jenkins/test-data/539cdc5c-0834-9c01-3d36-d1b0b68d5527/data/hbase/meta/1588230740/.tmp/ns/9c2b751a94f34277b6cd2b91c26aa4d4 is 43, key is default/ns:d/1733428432132/Put/seqid=0 2024-12-05T19:53:53,238 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44597 is added to blk_1073741841_1017 (size=5153) 2024-12-05T19:53:53,238 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38381 is added to blk_1073741841_1017 (size=5153) 2024-12-05T19:53:53,239 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44737 is added to blk_1073741841_1017 (size=5153) 2024-12-05T19:53:53,239 INFO [RS_CLOSE_META-regionserver/86162e2766a8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:42213/user/jenkins/test-data/539cdc5c-0834-9c01-3d36-d1b0b68d5527/data/hbase/meta/1588230740/.tmp/ns/9c2b751a94f34277b6cd2b91c26aa4d4 2024-12-05T19:53:53,241 INFO [regionserver/86162e2766a8:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-05T19:53:53,241 INFO [regionserver/86162e2766a8:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-05T19:53:53,241 INFO [regionserver/86162e2766a8:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-05T19:53:53,271 DEBUG [RS_CLOSE_META-regionserver/86162e2766a8:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42213/user/jenkins/test-data/539cdc5c-0834-9c01-3d36-d1b0b68d5527/data/hbase/meta/1588230740/.tmp/table/6af6181c74bf4a328efd1f9197654db1 is 52, key is TestHBaseWalOnEC/table:state/1733428432595/Put/seqid=0 2024-12-05T19:53:53,279 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44737 is added to blk_1073741842_1018 (size=5249) 2024-12-05T19:53:53,279 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44597 is added to blk_1073741842_1018 (size=5249) 2024-12-05T19:53:53,280 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38381 is added to blk_1073741842_1018 (size=5249) 2024-12-05T19:53:53,280 INFO [RS_CLOSE_META-regionserver/86162e2766a8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=96 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:42213/user/jenkins/test-data/539cdc5c-0834-9c01-3d36-d1b0b68d5527/data/hbase/meta/1588230740/.tmp/table/6af6181c74bf4a328efd1f9197654db1 2024-12-05T19:53:53,289 INFO [RS:0;86162e2766a8:40645 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-05T19:53:53,289 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40645-0x10063be81290001, quorum=127.0.0.1:64776, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-05T19:53:53,289 INFO [RS:0;86162e2766a8:40645 {}] regionserver.HRegionServer(1031): Exiting; stopping=86162e2766a8,40645,1733428430982; zookeeper connection closed. 2024-12-05T19:53:53,289 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40645-0x10063be81290001, quorum=127.0.0.1:64776, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-05T19:53:53,290 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@1964532f {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@1964532f 2024-12-05T19:53:53,290 DEBUG [RS_CLOSE_META-regionserver/86162e2766a8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42213/user/jenkins/test-data/539cdc5c-0834-9c01-3d36-d1b0b68d5527/data/hbase/meta/1588230740/.tmp/info/e48b273aa9e34fd6a2c3c58f764e6d45 as hdfs://localhost:42213/user/jenkins/test-data/539cdc5c-0834-9c01-3d36-d1b0b68d5527/data/hbase/meta/1588230740/info/e48b273aa9e34fd6a2c3c58f764e6d45 2024-12-05T19:53:53,297 INFO [RS_CLOSE_META-regionserver/86162e2766a8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42213/user/jenkins/test-data/539cdc5c-0834-9c01-3d36-d1b0b68d5527/data/hbase/meta/1588230740/info/e48b273aa9e34fd6a2c3c58f764e6d45, entries=10, sequenceid=11, filesize=6.5 K 2024-12-05T19:53:53,299 DEBUG [RS_CLOSE_META-regionserver/86162e2766a8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42213/user/jenkins/test-data/539cdc5c-0834-9c01-3d36-d1b0b68d5527/data/hbase/meta/1588230740/.tmp/ns/9c2b751a94f34277b6cd2b91c26aa4d4 as hdfs://localhost:42213/user/jenkins/test-data/539cdc5c-0834-9c01-3d36-d1b0b68d5527/data/hbase/meta/1588230740/ns/9c2b751a94f34277b6cd2b91c26aa4d4 2024-12-05T19:53:53,306 INFO [RS_CLOSE_META-regionserver/86162e2766a8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42213/user/jenkins/test-data/539cdc5c-0834-9c01-3d36-d1b0b68d5527/data/hbase/meta/1588230740/ns/9c2b751a94f34277b6cd2b91c26aa4d4, entries=2, sequenceid=11, filesize=5.0 K 2024-12-05T19:53:53,308 DEBUG [RS_CLOSE_META-regionserver/86162e2766a8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42213/user/jenkins/test-data/539cdc5c-0834-9c01-3d36-d1b0b68d5527/data/hbase/meta/1588230740/.tmp/table/6af6181c74bf4a328efd1f9197654db1 as hdfs://localhost:42213/user/jenkins/test-data/539cdc5c-0834-9c01-3d36-d1b0b68d5527/data/hbase/meta/1588230740/table/6af6181c74bf4a328efd1f9197654db1 2024-12-05T19:53:53,315 INFO [RS_CLOSE_META-regionserver/86162e2766a8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42213/user/jenkins/test-data/539cdc5c-0834-9c01-3d36-d1b0b68d5527/data/hbase/meta/1588230740/table/6af6181c74bf4a328efd1f9197654db1, entries=2, sequenceid=11, filesize=5.1 K 2024-12-05T19:53:53,317 INFO [RS_CLOSE_META-regionserver/86162e2766a8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 147ms, sequenceid=11, compaction requested=false 2024-12-05T19:53:53,324 DEBUG [RS_CLOSE_META-regionserver/86162e2766a8:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42213/user/jenkins/test-data/539cdc5c-0834-9c01-3d36-d1b0b68d5527/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-12-05T19:53:53,324 DEBUG [RS_CLOSE_META-regionserver/86162e2766a8:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-05T19:53:53,325 INFO [RS_CLOSE_META-regionserver/86162e2766a8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-05T19:53:53,325 DEBUG [RS_CLOSE_META-regionserver/86162e2766a8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733428433170Running coprocessor pre-close hooks at 1733428433170Disabling compacts and flushes for region at 1733428433170Disabling writes for close at 1733428433170Obtaining lock to block concurrent updates at 1733428433175 (+5 ms)Preparing flush snapshotting stores in 1588230740 at 1733428433175Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1377, getHeapSize=3392, getOffHeapSize=0, getCellsCount=14 at 1733428433176 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1733428433177 (+1 ms)Flushing 1588230740/info: creating writer at 1733428433177Flushing 1588230740/info: appending metadata at 1733428433198 (+21 ms)Flushing 1588230740/info: closing flushed file at 1733428433198Flushing 1588230740/ns: creating writer at 1733428433214 (+16 ms)Flushing 1588230740/ns: appending metadata at 1733428433230 (+16 ms)Flushing 1588230740/ns: closing flushed file at 1733428433230Flushing 1588230740/table: creating writer at 1733428433247 (+17 ms)Flushing 1588230740/table: appending metadata at 1733428433270 (+23 ms)Flushing 1588230740/table: closing flushed file at 1733428433270Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7a6f1f0: reopening flushed file at 1733428433288 (+18 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1b6fda6c: reopening flushed file at 1733428433298 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3575d0a6: reopening flushed file at 1733428433307 (+9 ms)Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 147ms, sequenceid=11, compaction requested=false at 1733428433317 (+10 ms)Writing region close event to WAL at 1733428433318 (+1 ms)Running coprocessor post-close hooks at 1733428433324 (+6 ms)Closed at 1733428433324 2024-12-05T19:53:53,325 DEBUG [RS_CLOSE_META-regionserver/86162e2766a8:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-05T19:53:53,338 INFO [regionserver/86162e2766a8:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-05T19:53:53,338 INFO [regionserver/86162e2766a8:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-05T19:53:53,338 INFO [regionserver/86162e2766a8:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-05T19:53:53,338 INFO [regionserver/86162e2766a8:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-05T19:53:53,370 INFO [RS:2;86162e2766a8:39485 {}] regionserver.HRegionServer(976): stopping server 86162e2766a8,39485,1733428431042; all regions closed. 2024-12-05T19:53:53,371 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T19:53:53,371 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T19:53:53,371 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T19:53:53,371 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T19:53:53,371 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T19:53:53,375 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38381 is added to blk_1073741836_1012 (size=2751) 2024-12-05T19:53:53,375 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44597 is added to blk_1073741836_1012 (size=2751) 2024-12-05T19:53:53,375 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44737 is added to blk_1073741836_1012 (size=2751) 2024-12-05T19:53:53,375 INFO [RS:1;86162e2766a8:40097 {}] regionserver.HRegionServer(976): stopping server 86162e2766a8,40097,1733428431012; all regions closed. 2024-12-05T19:53:53,376 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T19:53:53,376 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T19:53:53,377 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T19:53:53,377 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T19:53:53,377 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T19:53:53,381 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38381 is added to blk_1073741834_1010 (size=1298) 2024-12-05T19:53:53,382 DEBUG [RS:2;86162e2766a8:39485 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/539cdc5c-0834-9c01-3d36-d1b0b68d5527/oldWALs 2024-12-05T19:53:53,382 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44737 is added to blk_1073741834_1010 (size=1298) 2024-12-05T19:53:53,382 INFO [RS:2;86162e2766a8:39485 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 86162e2766a8%2C39485%2C1733428431042.meta:.meta(num 1733428432064) 2024-12-05T19:53:53,382 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44597 is added to blk_1073741834_1010 (size=1298) 2024-12-05T19:53:53,383 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T19:53:53,383 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T19:53:53,383 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T19:53:53,384 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T19:53:53,384 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T19:53:53,386 DEBUG [RS:1;86162e2766a8:40097 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/539cdc5c-0834-9c01-3d36-d1b0b68d5527/oldWALs 2024-12-05T19:53:53,386 INFO [RS:1;86162e2766a8:40097 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 86162e2766a8%2C40097%2C1733428431012:(num 1733428431495) 2024-12-05T19:53:53,386 DEBUG [RS:1;86162e2766a8:40097 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T19:53:53,386 INFO [RS:1;86162e2766a8:40097 {}] regionserver.LeaseManager(133): Closed leases 2024-12-05T19:53:53,386 INFO [RS:1;86162e2766a8:40097 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-05T19:53:53,387 INFO [RS:1;86162e2766a8:40097 {}] hbase.ChoreService(370): Chore service for: regionserver/86162e2766a8:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-05T19:53:53,387 INFO [RS:1;86162e2766a8:40097 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-05T19:53:53,387 INFO [RS:1;86162e2766a8:40097 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-05T19:53:53,387 INFO [RS:1;86162e2766a8:40097 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-05T19:53:53,387 INFO [RS:1;86162e2766a8:40097 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-05T19:53:53,387 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38381 is added to blk_1073741833_1009 (size=93) 2024-12-05T19:53:53,387 INFO [RS:1;86162e2766a8:40097 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:40097 2024-12-05T19:53:53,387 INFO [regionserver/86162e2766a8:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-05T19:53:53,389 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44597 is added to blk_1073741833_1009 (size=93) 2024-12-05T19:53:53,390 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44737 is added to blk_1073741833_1009 (size=93) 2024-12-05T19:53:53,390 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46269-0x10063be81290000, quorum=127.0.0.1:64776, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-05T19:53:53,391 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40097-0x10063be81290002, quorum=127.0.0.1:64776, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/86162e2766a8,40097,1733428431012 2024-12-05T19:53:53,391 INFO [RS:1;86162e2766a8:40097 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-05T19:53:53,392 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [86162e2766a8,40097,1733428431012] 2024-12-05T19:53:53,393 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/86162e2766a8,40097,1733428431012 already deleted, retry=false 2024-12-05T19:53:53,393 DEBUG [RS:2;86162e2766a8:39485 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/539cdc5c-0834-9c01-3d36-d1b0b68d5527/oldWALs 2024-12-05T19:53:53,393 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 86162e2766a8,40097,1733428431012 expired; onlineServers=1 2024-12-05T19:53:53,394 INFO [RS:2;86162e2766a8:39485 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 86162e2766a8%2C39485%2C1733428431042:(num 1733428431479) 2024-12-05T19:53:53,394 DEBUG [RS:2;86162e2766a8:39485 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T19:53:53,394 INFO [RS:2;86162e2766a8:39485 {}] regionserver.LeaseManager(133): Closed leases 2024-12-05T19:53:53,394 INFO [RS:2;86162e2766a8:39485 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-05T19:53:53,394 INFO [RS:2;86162e2766a8:39485 {}] hbase.ChoreService(370): Chore service for: regionserver/86162e2766a8:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-05T19:53:53,394 INFO [RS:2;86162e2766a8:39485 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-05T19:53:53,394 INFO [regionserver/86162e2766a8:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-05T19:53:53,394 INFO [RS:2;86162e2766a8:39485 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:39485 2024-12-05T19:53:53,397 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39485-0x10063be81290003, quorum=127.0.0.1:64776, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/86162e2766a8,39485,1733428431042 2024-12-05T19:53:53,397 INFO [RS:2;86162e2766a8:39485 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-05T19:53:53,397 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46269-0x10063be81290000, quorum=127.0.0.1:64776, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-05T19:53:53,398 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [86162e2766a8,39485,1733428431042] 2024-12-05T19:53:53,399 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/86162e2766a8,39485,1733428431042 already deleted, retry=false 2024-12-05T19:53:53,400 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 86162e2766a8,39485,1733428431042 expired; onlineServers=0 2024-12-05T19:53:53,400 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '86162e2766a8,46269,1733428430935' ***** 2024-12-05T19:53:53,400 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-05T19:53:53,400 INFO [M:0;86162e2766a8:46269 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-05T19:53:53,400 INFO [M:0;86162e2766a8:46269 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-05T19:53:53,400 DEBUG [M:0;86162e2766a8:46269 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-05T19:53:53,400 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-05T19:53:53,400 DEBUG [M:0;86162e2766a8:46269 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-05T19:53:53,400 DEBUG [master/86162e2766a8:0:becomeActiveMaster-HFileCleaner.large.0-1733428431268 {}] cleaner.HFileCleaner(306): Exit Thread[master/86162e2766a8:0:becomeActiveMaster-HFileCleaner.large.0-1733428431268,5,FailOnTimeoutGroup] 2024-12-05T19:53:53,400 DEBUG [master/86162e2766a8:0:becomeActiveMaster-HFileCleaner.small.0-1733428431270 {}] cleaner.HFileCleaner(306): Exit Thread[master/86162e2766a8:0:becomeActiveMaster-HFileCleaner.small.0-1733428431270,5,FailOnTimeoutGroup] 2024-12-05T19:53:53,400 INFO [M:0;86162e2766a8:46269 {}] hbase.ChoreService(370): Chore service for: master/86162e2766a8:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-05T19:53:53,400 INFO [M:0;86162e2766a8:46269 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-05T19:53:53,400 DEBUG [M:0;86162e2766a8:46269 {}] master.HMaster(1795): Stopping service threads 2024-12-05T19:53:53,401 INFO [M:0;86162e2766a8:46269 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-05T19:53:53,401 INFO [M:0;86162e2766a8:46269 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-05T19:53:53,401 INFO [M:0;86162e2766a8:46269 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-05T19:53:53,401 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-05T19:53:53,402 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46269-0x10063be81290000, quorum=127.0.0.1:64776, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-05T19:53:53,402 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46269-0x10063be81290000, quorum=127.0.0.1:64776, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T19:53:53,402 DEBUG [M:0;86162e2766a8:46269 {}] zookeeper.ZKUtil(347): master:46269-0x10063be81290000, quorum=127.0.0.1:64776, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-05T19:53:53,402 WARN [M:0;86162e2766a8:46269 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-05T19:53:53,403 INFO [M:0;86162e2766a8:46269 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:42213/user/jenkins/test-data/539cdc5c-0834-9c01-3d36-d1b0b68d5527/.lastflushedseqids 2024-12-05T19:53:53,417 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44597 is added to blk_1073741843_1019 (size=127) 2024-12-05T19:53:53,417 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44737 is added to blk_1073741843_1019 (size=127) 2024-12-05T19:53:53,418 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38381 is added to blk_1073741843_1019 (size=127) 2024-12-05T19:53:53,418 INFO [M:0;86162e2766a8:46269 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-05T19:53:53,418 INFO [M:0;86162e2766a8:46269 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-05T19:53:53,419 DEBUG [M:0;86162e2766a8:46269 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-05T19:53:53,419 INFO [M:0;86162e2766a8:46269 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-05T19:53:53,419 DEBUG [M:0;86162e2766a8:46269 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-05T19:53:53,419 DEBUG [M:0;86162e2766a8:46269 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-05T19:53:53,419 DEBUG [M:0;86162e2766a8:46269 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-05T19:53:53,419 INFO [M:0;86162e2766a8:46269 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=26.82 KB heapSize=34.11 KB 2024-12-05T19:53:53,446 DEBUG [M:0;86162e2766a8:46269 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42213/user/jenkins/test-data/539cdc5c-0834-9c01-3d36-d1b0b68d5527/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/f5057cef11a04251816802c9513fdcc0 is 82, key is hbase:meta,,1/info:regioninfo/1733428432112/Put/seqid=0 2024-12-05T19:53:53,455 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38381 is added to blk_1073741844_1020 (size=5672) 2024-12-05T19:53:53,455 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44597 is added to blk_1073741844_1020 (size=5672) 2024-12-05T19:53:53,456 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44737 is added to blk_1073741844_1020 (size=5672) 2024-12-05T19:53:53,456 INFO [M:0;86162e2766a8:46269 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:42213/user/jenkins/test-data/539cdc5c-0834-9c01-3d36-d1b0b68d5527/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/f5057cef11a04251816802c9513fdcc0 2024-12-05T19:53:53,490 DEBUG [M:0;86162e2766a8:46269 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42213/user/jenkins/test-data/539cdc5c-0834-9c01-3d36-d1b0b68d5527/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/7f54bfb6f0ef4193b9eb4f3fc00000a9 is 748, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1733428432603/Put/seqid=0 2024-12-05T19:53:53,492 WARN [IPC Server handler 2 on default port 42213 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-05T19:53:53,492 WARN [IPC Server handler 2 on default port 42213 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-05T19:53:53,492 WARN [IPC Server handler 2 on default port 42213 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-05T19:53:53,493 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40097-0x10063be81290002, quorum=127.0.0.1:64776, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-05T19:53:53,493 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40097-0x10063be81290002, quorum=127.0.0.1:64776, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-05T19:53:53,493 INFO [RS:1;86162e2766a8:40097 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-05T19:53:53,493 INFO [RS:1;86162e2766a8:40097 {}] regionserver.HRegionServer(1031): Exiting; stopping=86162e2766a8,40097,1733428431012; zookeeper connection closed. 2024-12-05T19:53:53,493 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@68f7bf1 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@68f7bf1 2024-12-05T19:53:53,498 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44737 is added to blk_1073741845_1021 (size=6438) 2024-12-05T19:53:53,499 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39485-0x10063be81290003, quorum=127.0.0.1:64776, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-05T19:53:53,499 INFO [RS:2;86162e2766a8:39485 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-05T19:53:53,499 INFO [RS:2;86162e2766a8:39485 {}] regionserver.HRegionServer(1031): Exiting; stopping=86162e2766a8,39485,1733428431042; zookeeper connection closed. 2024-12-05T19:53:53,499 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38381 is added to blk_1073741845_1021 (size=6438) 2024-12-05T19:53:53,499 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39485-0x10063be81290003, quorum=127.0.0.1:64776, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-05T19:53:53,500 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@6b511a08 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@6b511a08 2024-12-05T19:53:53,500 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 3 regionserver(s) complete 2024-12-05T19:53:53,500 INFO [M:0;86162e2766a8:46269 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.14 KB at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:42213/user/jenkins/test-data/539cdc5c-0834-9c01-3d36-d1b0b68d5527/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/7f54bfb6f0ef4193b9eb4f3fc00000a9 2024-12-05T19:53:53,524 DEBUG [M:0;86162e2766a8:46269 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42213/user/jenkins/test-data/539cdc5c-0834-9c01-3d36-d1b0b68d5527/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/e3dbbeb089f74beb95a822fc8779445c is 69, key is 86162e2766a8,39485,1733428431042/rs:state/1733428431299/Put/seqid=0 2024-12-05T19:53:53,526 WARN [IPC Server handler 4 on default port 42213 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-05T19:53:53,526 WARN [IPC Server handler 4 on default port 42213 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-05T19:53:53,526 WARN [IPC Server handler 4 on default port 42213 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-05T19:53:53,531 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44737 is added to blk_1073741846_1022 (size=5294) 2024-12-05T19:53:53,532 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38381 is added to blk_1073741846_1022 (size=5294) 2024-12-05T19:53:53,533 INFO [M:0;86162e2766a8:46269 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=195 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:42213/user/jenkins/test-data/539cdc5c-0834-9c01-3d36-d1b0b68d5527/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/e3dbbeb089f74beb95a822fc8779445c 2024-12-05T19:53:53,541 DEBUG [M:0;86162e2766a8:46269 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42213/user/jenkins/test-data/539cdc5c-0834-9c01-3d36-d1b0b68d5527/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/f5057cef11a04251816802c9513fdcc0 as hdfs://localhost:42213/user/jenkins/test-data/539cdc5c-0834-9c01-3d36-d1b0b68d5527/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/f5057cef11a04251816802c9513fdcc0 2024-12-05T19:53:53,548 INFO [M:0;86162e2766a8:46269 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42213/user/jenkins/test-data/539cdc5c-0834-9c01-3d36-d1b0b68d5527/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/f5057cef11a04251816802c9513fdcc0, entries=8, sequenceid=72, filesize=5.5 K 2024-12-05T19:53:53,549 DEBUG [M:0;86162e2766a8:46269 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42213/user/jenkins/test-data/539cdc5c-0834-9c01-3d36-d1b0b68d5527/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/7f54bfb6f0ef4193b9eb4f3fc00000a9 as hdfs://localhost:42213/user/jenkins/test-data/539cdc5c-0834-9c01-3d36-d1b0b68d5527/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/7f54bfb6f0ef4193b9eb4f3fc00000a9 2024-12-05T19:53:53,556 INFO [M:0;86162e2766a8:46269 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42213/user/jenkins/test-data/539cdc5c-0834-9c01-3d36-d1b0b68d5527/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/7f54bfb6f0ef4193b9eb4f3fc00000a9, entries=8, sequenceid=72, filesize=6.3 K 2024-12-05T19:53:53,557 DEBUG [M:0;86162e2766a8:46269 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42213/user/jenkins/test-data/539cdc5c-0834-9c01-3d36-d1b0b68d5527/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/e3dbbeb089f74beb95a822fc8779445c as hdfs://localhost:42213/user/jenkins/test-data/539cdc5c-0834-9c01-3d36-d1b0b68d5527/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/e3dbbeb089f74beb95a822fc8779445c 2024-12-05T19:53:53,565 INFO [M:0;86162e2766a8:46269 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42213/user/jenkins/test-data/539cdc5c-0834-9c01-3d36-d1b0b68d5527/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/e3dbbeb089f74beb95a822fc8779445c, entries=3, sequenceid=72, filesize=5.2 K 2024-12-05T19:53:53,567 INFO [M:0;86162e2766a8:46269 {}] regionserver.HRegion(3140): Finished flush of dataSize ~26.82 KB/27462, heapSize ~33.81 KB/34624, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 148ms, sequenceid=72, compaction requested=false 2024-12-05T19:53:53,579 INFO [M:0;86162e2766a8:46269 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-05T19:53:53,580 DEBUG [M:0;86162e2766a8:46269 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733428433419Disabling compacts and flushes for region at 1733428433419Disabling writes for close at 1733428433419Obtaining lock to block concurrent updates at 1733428433419Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733428433419Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=27462, getHeapSize=34864, getOffHeapSize=0, getCellsCount=85 at 1733428433420 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733428433421 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733428433421Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733428433445 (+24 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733428433445Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733428433465 (+20 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733428433489 (+24 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733428433489Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733428433507 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733428433524 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733428433524Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@9c1e001: reopening flushed file at 1733428433540 (+16 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6be35149: reopening flushed file at 1733428433549 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3d19f761: reopening flushed file at 1733428433556 (+7 ms)Finished flush of dataSize ~26.82 KB/27462, heapSize ~33.81 KB/34624, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 148ms, sequenceid=72, compaction requested=false at 1733428433567 (+11 ms)Writing region close event to WAL at 1733428433579 (+12 ms)Closed at 1733428433579 2024-12-05T19:53:53,587 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T19:53:53,587 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T19:53:53,587 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T19:53:53,587 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T19:53:53,587 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T19:53:53,592 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44737 is added to blk_1073741830_1006 (size=32665) 2024-12-05T19:53:53,598 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44597 is added to blk_1073741830_1006 (size=32665) 2024-12-05T19:53:53,598 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38381 is added to blk_1073741830_1006 (size=32665) 2024-12-05T19:53:53,600 INFO [M:0;86162e2766a8:46269 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-05T19:53:53,600 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-05T19:53:53,600 INFO [M:0;86162e2766a8:46269 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:46269 2024-12-05T19:53:53,600 INFO [M:0;86162e2766a8:46269 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-05T19:53:53,702 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46269-0x10063be81290000, quorum=127.0.0.1:64776, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-05T19:53:53,702 INFO [M:0;86162e2766a8:46269 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-05T19:53:53,702 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46269-0x10063be81290000, quorum=127.0.0.1:64776, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-05T19:53:53,705 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@f50f857{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-05T19:53:53,705 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7eeef71e{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-05T19:53:53,705 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-05T19:53:53,705 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@40b03519{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-05T19:53:53,705 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@44968fad{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/925e084a-3afc-12ce-5cee-f895408b7226/hadoop.log.dir/,STOPPED} 2024-12-05T19:53:53,707 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-05T19:53:53,707 WARN [BP-549980394-172.17.0.2-1733428429944 heartbeating to localhost/127.0.0.1:42213 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-05T19:53:53,707 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-05T19:53:53,707 WARN [BP-549980394-172.17.0.2-1733428429944 heartbeating to localhost/127.0.0.1:42213 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-549980394-172.17.0.2-1733428429944 (Datanode Uuid faccaf56-b3b3-4f38-b0da-b058fe16cdd2) service to localhost/127.0.0.1:42213 2024-12-05T19:53:53,708 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/925e084a-3afc-12ce-5cee-f895408b7226/cluster_47eab1e7-b1d9-b3be-804c-321e18c13f11/data/data5/current/BP-549980394-172.17.0.2-1733428429944 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-05T19:53:53,708 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/925e084a-3afc-12ce-5cee-f895408b7226/cluster_47eab1e7-b1d9-b3be-804c-321e18c13f11/data/data6/current/BP-549980394-172.17.0.2-1733428429944 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-05T19:53:53,708 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-05T19:53:53,710 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@46f4cd0a{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-05T19:53:53,710 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@700b2317{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-05T19:53:53,710 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-05T19:53:53,711 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@55cf3a01{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-05T19:53:53,711 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4b4148d4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/925e084a-3afc-12ce-5cee-f895408b7226/hadoop.log.dir/,STOPPED} 2024-12-05T19:53:53,712 WARN [BP-549980394-172.17.0.2-1733428429944 heartbeating to localhost/127.0.0.1:42213 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-05T19:53:53,712 WARN [BP-549980394-172.17.0.2-1733428429944 heartbeating to localhost/127.0.0.1:42213 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-549980394-172.17.0.2-1733428429944 (Datanode Uuid f4242a26-5320-42bc-9770-968079fa456b) service to localhost/127.0.0.1:42213 2024-12-05T19:53:53,713 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/925e084a-3afc-12ce-5cee-f895408b7226/cluster_47eab1e7-b1d9-b3be-804c-321e18c13f11/data/data3/current/BP-549980394-172.17.0.2-1733428429944 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-05T19:53:53,713 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/925e084a-3afc-12ce-5cee-f895408b7226/cluster_47eab1e7-b1d9-b3be-804c-321e18c13f11/data/data4/current/BP-549980394-172.17.0.2-1733428429944 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-05T19:53:53,713 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-05T19:53:53,713 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-05T19:53:53,713 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-05T19:53:53,715 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@18f854cf{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-05T19:53:53,716 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@70fdfe33{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-05T19:53:53,716 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-05T19:53:53,716 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@72f96008{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-05T19:53:53,716 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4c4ebd49{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/925e084a-3afc-12ce-5cee-f895408b7226/hadoop.log.dir/,STOPPED} 2024-12-05T19:53:53,717 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-05T19:53:53,717 WARN [BP-549980394-172.17.0.2-1733428429944 heartbeating to localhost/127.0.0.1:42213 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-05T19:53:53,717 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-05T19:53:53,717 WARN [BP-549980394-172.17.0.2-1733428429944 heartbeating to localhost/127.0.0.1:42213 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-549980394-172.17.0.2-1733428429944 (Datanode Uuid f5d09ef1-9248-4d9c-8501-7c71e629f496) service to localhost/127.0.0.1:42213 2024-12-05T19:53:53,718 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/925e084a-3afc-12ce-5cee-f895408b7226/cluster_47eab1e7-b1d9-b3be-804c-321e18c13f11/data/data1/current/BP-549980394-172.17.0.2-1733428429944 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-05T19:53:53,718 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/925e084a-3afc-12ce-5cee-f895408b7226/cluster_47eab1e7-b1d9-b3be-804c-321e18c13f11/data/data2/current/BP-549980394-172.17.0.2-1733428429944 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-05T19:53:53,719 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-05T19:53:53,727 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@15027254{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-05T19:53:53,728 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4293887f{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-05T19:53:53,728 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-05T19:53:53,728 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7e58533{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-05T19:53:53,728 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1ad8d9de{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/925e084a-3afc-12ce-5cee-f895408b7226/hadoop.log.dir/,STOPPED} 2024-12-05T19:53:53,736 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-05T19:53:53,770 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-05T19:53:53,778 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestHBaseWalOnEC#testReadWrite[1] Thread=148 (was 88) - Thread LEAK? -, OpenFileDescriptor=521 (was 441) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=287 (was 287), ProcessCount=11 (was 11), AvailableMemoryMB=8423 (was 8614)