2024-12-04 15:52:14,006 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba 2024-12-04 15:52:14,018 main DEBUG Took 0.009915 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-12-04 15:52:14,018 main DEBUG PluginManager 'Core' found 129 plugins 2024-12-04 15:52:14,018 main DEBUG PluginManager 'Level' found 0 plugins 2024-12-04 15:52:14,019 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-12-04 15:52:14,020 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-04 15:52:14,036 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-12-04 15:52:14,047 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-04 15:52:14,049 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-04 15:52:14,049 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-04 15:52:14,050 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-04 15:52:14,050 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-04 15:52:14,050 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-04 15:52:14,051 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-04 15:52:14,051 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-04 15:52:14,052 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-04 15:52:14,052 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-04 15:52:14,053 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-04 15:52:14,053 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-04 15:52:14,054 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-04 15:52:14,054 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-04 15:52:14,055 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-04 15:52:14,055 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-04 15:52:14,055 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-04 15:52:14,055 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-04 15:52:14,056 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-04 15:52:14,056 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-04 15:52:14,057 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-04 15:52:14,057 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-04 15:52:14,057 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-04 15:52:14,058 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-04 15:52:14,058 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-04 15:52:14,058 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-12-04 15:52:14,060 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-04 15:52:14,061 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-12-04 15:52:14,062 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-12-04 15:52:14,063 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-12-04 15:52:14,064 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-12-04 15:52:14,064 main DEBUG PluginManager 'Converter' found 47 plugins 2024-12-04 15:52:14,073 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-12-04 15:52:14,075 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-12-04 15:52:14,077 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-12-04 15:52:14,077 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-12-04 15:52:14,077 main DEBUG createAppenders(={Console}) 2024-12-04 15:52:14,078 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba initialized 2024-12-04 15:52:14,078 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba 2024-12-04 15:52:14,078 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba OK. 2024-12-04 15:52:14,079 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-12-04 15:52:14,079 main DEBUG OutputStream closed 2024-12-04 15:52:14,079 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-12-04 15:52:14,079 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-12-04 15:52:14,080 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@49c7b90e OK 2024-12-04 15:52:14,146 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-12-04 15:52:14,148 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-12-04 15:52:14,149 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-12-04 15:52:14,150 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-12-04 15:52:14,150 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-12-04 15:52:14,151 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-12-04 15:52:14,151 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-12-04 15:52:14,151 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-12-04 15:52:14,152 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-12-04 15:52:14,152 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-12-04 15:52:14,152 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-12-04 15:52:14,152 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-12-04 15:52:14,153 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-12-04 15:52:14,153 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-12-04 15:52:14,153 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-12-04 15:52:14,154 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-12-04 15:52:14,154 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-12-04 15:52:14,155 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-12-04 15:52:14,157 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-04 15:52:14,157 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-logging/target/hbase-logging-4.0.0-alpha-1-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@35432107) with optional ClassLoader: null 2024-12-04 15:52:14,158 main DEBUG Shutdown hook enabled. Registering a new one. 2024-12-04 15:52:14,158 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@35432107] started OK. 2024-12-04T15:52:14,175 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC timeout: 26 mins 2024-12-04 15:52:14,178 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-12-04 15:52:14,178 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-04T15:52:14,416 DEBUG [main {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b9e586b2-8d13-7277-bd27-636735ef8d2a 2024-12-04T15:52:14,440 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b9e586b2-8d13-7277-bd27-636735ef8d2a/cluster_74f99ff0-1fe5-798c-8e29-4b76516def66, deleteOnExit=true 2024-12-04T15:52:14,441 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b9e586b2-8d13-7277-bd27-636735ef8d2a/test.cache.data in system properties and HBase conf 2024-12-04T15:52:14,442 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b9e586b2-8d13-7277-bd27-636735ef8d2a/hadoop.tmp.dir in system properties and HBase conf 2024-12-04T15:52:14,442 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b9e586b2-8d13-7277-bd27-636735ef8d2a/hadoop.log.dir in system properties and HBase conf 2024-12-04T15:52:14,443 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b9e586b2-8d13-7277-bd27-636735ef8d2a/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-04T15:52:14,443 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b9e586b2-8d13-7277-bd27-636735ef8d2a/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-04T15:52:14,444 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-04T15:52:14,532 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-12-04T15:52:14,620 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-04T15:52:14,623 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b9e586b2-8d13-7277-bd27-636735ef8d2a/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-04T15:52:14,624 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b9e586b2-8d13-7277-bd27-636735ef8d2a/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-04T15:52:14,624 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b9e586b2-8d13-7277-bd27-636735ef8d2a/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-04T15:52:14,625 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b9e586b2-8d13-7277-bd27-636735ef8d2a/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-04T15:52:14,625 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b9e586b2-8d13-7277-bd27-636735ef8d2a/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-04T15:52:14,626 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b9e586b2-8d13-7277-bd27-636735ef8d2a/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-04T15:52:14,626 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b9e586b2-8d13-7277-bd27-636735ef8d2a/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-04T15:52:14,626 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b9e586b2-8d13-7277-bd27-636735ef8d2a/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-04T15:52:14,627 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b9e586b2-8d13-7277-bd27-636735ef8d2a/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-04T15:52:14,627 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b9e586b2-8d13-7277-bd27-636735ef8d2a/nfs.dump.dir in system properties and HBase conf 2024-12-04T15:52:14,628 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b9e586b2-8d13-7277-bd27-636735ef8d2a/java.io.tmpdir in system properties and HBase conf 2024-12-04T15:52:14,628 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b9e586b2-8d13-7277-bd27-636735ef8d2a/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-04T15:52:14,629 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b9e586b2-8d13-7277-bd27-636735ef8d2a/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-04T15:52:14,629 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b9e586b2-8d13-7277-bd27-636735ef8d2a/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-04T15:52:15,437 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-12-04T15:52:15,514 INFO [Time-limited test {}] log.Log(170): Logging initialized @2216ms to org.eclipse.jetty.util.log.Slf4jLog 2024-12-04T15:52:15,591 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-04T15:52:15,652 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-04T15:52:15,673 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-04T15:52:15,673 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-04T15:52:15,674 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-04T15:52:15,687 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-04T15:52:15,689 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@27f57d6a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b9e586b2-8d13-7277-bd27-636735ef8d2a/hadoop.log.dir/,AVAILABLE} 2024-12-04T15:52:15,690 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@410292bb{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-04T15:52:15,889 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2ea2dca6{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b9e586b2-8d13-7277-bd27-636735ef8d2a/java.io.tmpdir/jetty-localhost-38769-hadoop-hdfs-3_4_1-tests_jar-_-any-7353358728322953036/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-04T15:52:15,896 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2ed9b238{HTTP/1.1, (http/1.1)}{localhost:38769} 2024-12-04T15:52:15,896 INFO [Time-limited test {}] server.Server(415): Started @2599ms 2024-12-04T15:52:16,306 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-04T15:52:16,312 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-04T15:52:16,313 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-04T15:52:16,314 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-04T15:52:16,314 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-04T15:52:16,315 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1d0819de{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b9e586b2-8d13-7277-bd27-636735ef8d2a/hadoop.log.dir/,AVAILABLE} 2024-12-04T15:52:16,315 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@54f91ad6{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-04T15:52:16,436 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6d1a7cf{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b9e586b2-8d13-7277-bd27-636735ef8d2a/java.io.tmpdir/jetty-localhost-34765-hadoop-hdfs-3_4_1-tests_jar-_-any-2103222281051928136/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-04T15:52:16,437 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@20b70ca3{HTTP/1.1, (http/1.1)}{localhost:34765} 2024-12-04T15:52:16,437 INFO [Time-limited test {}] server.Server(415): Started @3140ms 2024-12-04T15:52:16,492 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-04T15:52:16,612 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-04T15:52:16,617 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-04T15:52:16,618 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-04T15:52:16,619 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-04T15:52:16,619 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-04T15:52:16,620 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@37e44dc6{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b9e586b2-8d13-7277-bd27-636735ef8d2a/hadoop.log.dir/,AVAILABLE} 2024-12-04T15:52:16,621 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1fb4f3a3{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-04T15:52:16,762 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1548acd1{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b9e586b2-8d13-7277-bd27-636735ef8d2a/java.io.tmpdir/jetty-localhost-42149-hadoop-hdfs-3_4_1-tests_jar-_-any-28013028509647244/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-04T15:52:16,763 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3dc20694{HTTP/1.1, (http/1.1)}{localhost:42149} 2024-12-04T15:52:16,763 INFO [Time-limited test {}] server.Server(415): Started @3466ms 2024-12-04T15:52:16,765 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-04T15:52:16,802 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-04T15:52:16,806 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-04T15:52:16,809 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-04T15:52:16,809 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-04T15:52:16,810 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-04T15:52:16,810 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@656f7043{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b9e586b2-8d13-7277-bd27-636735ef8d2a/hadoop.log.dir/,AVAILABLE} 2024-12-04T15:52:16,811 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@17c0da3a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-04T15:52:16,944 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3297a183{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b9e586b2-8d13-7277-bd27-636735ef8d2a/java.io.tmpdir/jetty-localhost-40753-hadoop-hdfs-3_4_1-tests_jar-_-any-14402139354164122203/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-04T15:52:16,945 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2b8a83a2{HTTP/1.1, (http/1.1)}{localhost:40753} 2024-12-04T15:52:16,946 INFO [Time-limited test {}] server.Server(415): Started @3649ms 2024-12-04T15:52:16,951 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-04T15:52:16,955 WARN [Thread-105 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b9e586b2-8d13-7277-bd27-636735ef8d2a/cluster_74f99ff0-1fe5-798c-8e29-4b76516def66/data/data1/current/BP-818689816-172.17.0.2-1733327535191/current, will proceed with Du for space computation calculation, 2024-12-04T15:52:16,955 WARN [Thread-106 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b9e586b2-8d13-7277-bd27-636735ef8d2a/cluster_74f99ff0-1fe5-798c-8e29-4b76516def66/data/data2/current/BP-818689816-172.17.0.2-1733327535191/current, will proceed with Du for space computation calculation, 2024-12-04T15:52:16,955 WARN [Thread-108 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b9e586b2-8d13-7277-bd27-636735ef8d2a/cluster_74f99ff0-1fe5-798c-8e29-4b76516def66/data/data4/current/BP-818689816-172.17.0.2-1733327535191/current, will proceed with Du for space computation calculation, 2024-12-04T15:52:16,955 WARN [Thread-107 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b9e586b2-8d13-7277-bd27-636735ef8d2a/cluster_74f99ff0-1fe5-798c-8e29-4b76516def66/data/data3/current/BP-818689816-172.17.0.2-1733327535191/current, will proceed with Du for space computation calculation, 2024-12-04T15:52:17,017 WARN [Thread-81 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-04T15:52:17,019 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-04T15:52:17,103 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x55c902a21e439 with lease ID 0x24a564835c83445d: Processing first storage report for DS-6e325b22-77ed-4b3a-bc7d-5936710c2006 from datanode DatanodeRegistration(127.0.0.1:41925, datanodeUuid=8731965f-42c8-41c6-be10-91d3c09948c5, infoPort=37259, infoSecurePort=0, ipcPort=33159, storageInfo=lv=-57;cid=testClusterID;nsid=157991918;c=1733327535191) 2024-12-04T15:52:17,104 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x55c902a21e439 with lease ID 0x24a564835c83445d: from storage DS-6e325b22-77ed-4b3a-bc7d-5936710c2006 node DatanodeRegistration(127.0.0.1:41925, datanodeUuid=8731965f-42c8-41c6-be10-91d3c09948c5, infoPort=37259, infoSecurePort=0, ipcPort=33159, storageInfo=lv=-57;cid=testClusterID;nsid=157991918;c=1733327535191), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-04T15:52:17,105 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xc9a179c2d1dfa7f3 with lease ID 0x24a564835c83445e: Processing first storage report for DS-effbfd14-6aeb-4d03-9115-05142749f245 from datanode DatanodeRegistration(127.0.0.1:33405, datanodeUuid=7ad2d57b-5f62-4e42-9464-cd1f2452152c, infoPort=40533, infoSecurePort=0, ipcPort=45217, storageInfo=lv=-57;cid=testClusterID;nsid=157991918;c=1733327535191) 2024-12-04T15:52:17,105 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc9a179c2d1dfa7f3 with lease ID 0x24a564835c83445e: from storage DS-effbfd14-6aeb-4d03-9115-05142749f245 node DatanodeRegistration(127.0.0.1:33405, datanodeUuid=7ad2d57b-5f62-4e42-9464-cd1f2452152c, infoPort=40533, infoSecurePort=0, ipcPort=45217, storageInfo=lv=-57;cid=testClusterID;nsid=157991918;c=1733327535191), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-04T15:52:17,106 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x55c902a21e439 with lease ID 0x24a564835c83445d: Processing first storage report for DS-615cd7e3-4991-4b08-8a6c-5ef4977edbc0 from datanode DatanodeRegistration(127.0.0.1:41925, datanodeUuid=8731965f-42c8-41c6-be10-91d3c09948c5, infoPort=37259, infoSecurePort=0, ipcPort=33159, storageInfo=lv=-57;cid=testClusterID;nsid=157991918;c=1733327535191) 2024-12-04T15:52:17,106 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x55c902a21e439 with lease ID 0x24a564835c83445d: from storage DS-615cd7e3-4991-4b08-8a6c-5ef4977edbc0 node DatanodeRegistration(127.0.0.1:41925, datanodeUuid=8731965f-42c8-41c6-be10-91d3c09948c5, infoPort=37259, infoSecurePort=0, ipcPort=33159, storageInfo=lv=-57;cid=testClusterID;nsid=157991918;c=1733327535191), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-04T15:52:17,106 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xc9a179c2d1dfa7f3 with lease ID 0x24a564835c83445e: Processing first storage report for DS-a4a2b354-9b64-4502-8688-f11add358f47 from datanode DatanodeRegistration(127.0.0.1:33405, datanodeUuid=7ad2d57b-5f62-4e42-9464-cd1f2452152c, infoPort=40533, infoSecurePort=0, ipcPort=45217, storageInfo=lv=-57;cid=testClusterID;nsid=157991918;c=1733327535191) 2024-12-04T15:52:17,106 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc9a179c2d1dfa7f3 with lease ID 0x24a564835c83445e: from storage DS-a4a2b354-9b64-4502-8688-f11add358f47 node DatanodeRegistration(127.0.0.1:33405, datanodeUuid=7ad2d57b-5f62-4e42-9464-cd1f2452152c, infoPort=40533, infoSecurePort=0, ipcPort=45217, storageInfo=lv=-57;cid=testClusterID;nsid=157991918;c=1733327535191), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-04T15:52:17,117 WARN [Thread-139 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b9e586b2-8d13-7277-bd27-636735ef8d2a/cluster_74f99ff0-1fe5-798c-8e29-4b76516def66/data/data5/current/BP-818689816-172.17.0.2-1733327535191/current, will proceed with Du for space computation calculation, 2024-12-04T15:52:17,121 WARN [Thread-140 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b9e586b2-8d13-7277-bd27-636735ef8d2a/cluster_74f99ff0-1fe5-798c-8e29-4b76516def66/data/data6/current/BP-818689816-172.17.0.2-1733327535191/current, will proceed with Du for space computation calculation, 2024-12-04T15:52:17,153 WARN [Thread-119 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-04T15:52:17,160 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x6602ad64fca5c63f with lease ID 0x24a564835c83445f: Processing first storage report for DS-52a8bfb8-612f-49d2-ab50-041a00986f87 from datanode DatanodeRegistration(127.0.0.1:45289, datanodeUuid=2051eead-5a6f-4a1d-80be-5deabe2a7a00, infoPort=38711, infoSecurePort=0, ipcPort=35583, storageInfo=lv=-57;cid=testClusterID;nsid=157991918;c=1733327535191) 2024-12-04T15:52:17,161 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x6602ad64fca5c63f with lease ID 0x24a564835c83445f: from storage DS-52a8bfb8-612f-49d2-ab50-041a00986f87 node DatanodeRegistration(127.0.0.1:45289, datanodeUuid=2051eead-5a6f-4a1d-80be-5deabe2a7a00, infoPort=38711, infoSecurePort=0, ipcPort=35583, storageInfo=lv=-57;cid=testClusterID;nsid=157991918;c=1733327535191), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-04T15:52:17,161 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x6602ad64fca5c63f with lease ID 0x24a564835c83445f: Processing first storage report for DS-8a898328-baa6-4f91-8499-9ba28c3b147b from datanode DatanodeRegistration(127.0.0.1:45289, datanodeUuid=2051eead-5a6f-4a1d-80be-5deabe2a7a00, infoPort=38711, infoSecurePort=0, ipcPort=35583, storageInfo=lv=-57;cid=testClusterID;nsid=157991918;c=1733327535191) 2024-12-04T15:52:17,162 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x6602ad64fca5c63f with lease ID 0x24a564835c83445f: from storage DS-8a898328-baa6-4f91-8499-9ba28c3b147b node DatanodeRegistration(127.0.0.1:45289, datanodeUuid=2051eead-5a6f-4a1d-80be-5deabe2a7a00, infoPort=38711, infoSecurePort=0, ipcPort=35583, storageInfo=lv=-57;cid=testClusterID;nsid=157991918;c=1733327535191), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-04T15:52:17,345 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b9e586b2-8d13-7277-bd27-636735ef8d2a 2024-12-04T15:52:17,418 WARN [Time-limited test {}] erasurecode.ErasureCodeNative(55): ISA-L support is not available in your platform... using builtin-java codec where applicable 2024-12-04T15:52:17,474 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestHBaseWalOnEC#testReadWrite[0] Thread=160, OpenFileDescriptor=391, MaxFileDescriptor=1048576, SystemLoadAverage=162, ProcessCount=11, AvailableMemoryMB=9914 2024-12-04T15:52:17,476 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=3, rsPorts=, rsClass=null, numDataNodes=3, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-04T15:52:17,485 INFO [Time-limited test {}] hbase.HBaseTestingUtil(821): NOT STARTING DFS 2024-12-04T15:52:17,560 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b9e586b2-8d13-7277-bd27-636735ef8d2a/cluster_74f99ff0-1fe5-798c-8e29-4b76516def66/zookeeper_0, clientPort=64574, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b9e586b2-8d13-7277-bd27-636735ef8d2a/cluster_74f99ff0-1fe5-798c-8e29-4b76516def66/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b9e586b2-8d13-7277-bd27-636735ef8d2a/cluster_74f99ff0-1fe5-798c-8e29-4b76516def66/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-04T15:52:17,570 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=64574 2024-12-04T15:52:17,580 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-04T15:52:17,582 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-04T15:52:17,688 WARN [Time-limited test {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-04T15:52:17,688 WARN [Time-limited test {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-04T15:52:17,748 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-780551652_22 at /127.0.0.1:60534 [Receiving block BP-818689816-172.17.0.2-1733327535191:blk_-9223372036854775792_1001] {}] datanode.DataXceiver(331): 127.0.0.1:33405:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60534 dst: /127.0.0.1:33405 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:52:17,771 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33405 is added to blk_-9223372036854775792_1002 (size=7) 2024-12-04T15:52:18,167 WARN [Time-limited test {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-04T15:52:18,177 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:34217/user/jenkins/test-data/f13b5a61-6504-4cf8-1d52-bcda544fecc1 with version=8 2024-12-04T15:52:18,177 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:34217/user/jenkins/test-data/f13b5a61-6504-4cf8-1d52-bcda544fecc1/hbase-staging 2024-12-04T15:52:18,284 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-12-04T15:52:18,532 INFO [Time-limited test {}] client.ConnectionUtils(128): master/a21b6491b371:0 server-side Connection retries=45 2024-12-04T15:52:18,542 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-04T15:52:18,543 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-04T15:52:18,547 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-04T15:52:18,547 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-04T15:52:18,548 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-04T15:52:18,694 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-04T15:52:18,758 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-12-04T15:52:18,768 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-12-04T15:52:18,772 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-04T15:52:18,800 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 10507 (auto-detected) 2024-12-04T15:52:18,801 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-12-04T15:52:18,823 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:46261 2024-12-04T15:52:18,845 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:46261 connecting to ZooKeeper ensemble=127.0.0.1:64574 2024-12-04T15:52:18,879 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:462610x0, quorum=127.0.0.1:64574, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-04T15:52:18,882 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:46261-0x1017e2d30160000 connected 2024-12-04T15:52:18,909 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-04T15:52:18,912 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-04T15:52:18,923 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:46261-0x1017e2d30160000, quorum=127.0.0.1:64574, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-04T15:52:18,928 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:34217/user/jenkins/test-data/f13b5a61-6504-4cf8-1d52-bcda544fecc1, hbase.cluster.distributed=false 2024-12-04T15:52:18,954 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:46261-0x1017e2d30160000, quorum=127.0.0.1:64574, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-04T15:52:18,958 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=46261 2024-12-04T15:52:18,959 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=46261 2024-12-04T15:52:18,959 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=46261 2024-12-04T15:52:18,960 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=46261 2024-12-04T15:52:18,960 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=46261 2024-12-04T15:52:19,073 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/a21b6491b371:0 server-side Connection retries=45 2024-12-04T15:52:19,075 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-04T15:52:19,075 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-04T15:52:19,076 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-04T15:52:19,076 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-04T15:52:19,076 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-04T15:52:19,079 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-04T15:52:19,081 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-04T15:52:19,082 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:43197 2024-12-04T15:52:19,084 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:43197 connecting to ZooKeeper ensemble=127.0.0.1:64574 2024-12-04T15:52:19,085 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-04T15:52:19,091 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-04T15:52:19,099 DEBUG [pool-65-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:431970x0, quorum=127.0.0.1:64574, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-04T15:52:19,100 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:43197-0x1017e2d30160001 connected 2024-12-04T15:52:19,100 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:43197-0x1017e2d30160001, quorum=127.0.0.1:64574, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-04T15:52:19,105 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-04T15:52:19,112 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-04T15:52:19,115 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:43197-0x1017e2d30160001, quorum=127.0.0.1:64574, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-04T15:52:19,120 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:43197-0x1017e2d30160001, quorum=127.0.0.1:64574, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-04T15:52:19,120 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=43197 2024-12-04T15:52:19,121 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=43197 2024-12-04T15:52:19,122 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=43197 2024-12-04T15:52:19,122 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=43197 2024-12-04T15:52:19,123 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=43197 2024-12-04T15:52:19,140 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/a21b6491b371:0 server-side Connection retries=45 2024-12-04T15:52:19,140 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-04T15:52:19,140 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-04T15:52:19,141 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-04T15:52:19,141 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-04T15:52:19,141 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-04T15:52:19,141 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-04T15:52:19,142 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-04T15:52:19,142 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:39883 2024-12-04T15:52:19,144 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:39883 connecting to ZooKeeper ensemble=127.0.0.1:64574 2024-12-04T15:52:19,145 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-04T15:52:19,148 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-04T15:52:19,155 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:398830x0, quorum=127.0.0.1:64574, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-04T15:52:19,155 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:39883-0x1017e2d30160002 connected 2024-12-04T15:52:19,156 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39883-0x1017e2d30160002, quorum=127.0.0.1:64574, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-04T15:52:19,156 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-04T15:52:19,158 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-04T15:52:19,159 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39883-0x1017e2d30160002, quorum=127.0.0.1:64574, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-04T15:52:19,161 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39883-0x1017e2d30160002, quorum=127.0.0.1:64574, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-04T15:52:19,161 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=39883 2024-12-04T15:52:19,162 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=39883 2024-12-04T15:52:19,164 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=39883 2024-12-04T15:52:19,168 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=39883 2024-12-04T15:52:19,168 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=39883 2024-12-04T15:52:19,185 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/a21b6491b371:0 server-side Connection retries=45 2024-12-04T15:52:19,185 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-04T15:52:19,185 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-04T15:52:19,185 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-04T15:52:19,186 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-04T15:52:19,186 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-04T15:52:19,186 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-04T15:52:19,186 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-04T15:52:19,187 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:35881 2024-12-04T15:52:19,188 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:35881 connecting to ZooKeeper ensemble=127.0.0.1:64574 2024-12-04T15:52:19,190 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-04T15:52:19,192 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-04T15:52:19,198 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:358810x0, quorum=127.0.0.1:64574, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-04T15:52:19,198 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:35881-0x1017e2d30160003 connected 2024-12-04T15:52:19,198 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35881-0x1017e2d30160003, quorum=127.0.0.1:64574, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-04T15:52:19,199 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-04T15:52:19,200 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-04T15:52:19,201 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35881-0x1017e2d30160003, quorum=127.0.0.1:64574, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-04T15:52:19,203 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35881-0x1017e2d30160003, quorum=127.0.0.1:64574, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-04T15:52:19,204 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=35881 2024-12-04T15:52:19,204 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=35881 2024-12-04T15:52:19,206 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=35881 2024-12-04T15:52:19,207 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=35881 2024-12-04T15:52:19,208 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=35881 2024-12-04T15:52:19,222 DEBUG [M:0;a21b6491b371:46261 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;a21b6491b371:46261 2024-12-04T15:52:19,223 INFO [master/a21b6491b371:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/a21b6491b371,46261,1733327538339 2024-12-04T15:52:19,231 DEBUG [pool-65-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43197-0x1017e2d30160001, quorum=127.0.0.1:64574, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-04T15:52:19,231 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35881-0x1017e2d30160003, quorum=127.0.0.1:64574, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-04T15:52:19,231 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39883-0x1017e2d30160002, quorum=127.0.0.1:64574, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-04T15:52:19,231 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46261-0x1017e2d30160000, quorum=127.0.0.1:64574, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-04T15:52:19,234 DEBUG [master/a21b6491b371:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:46261-0x1017e2d30160000, quorum=127.0.0.1:64574, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/a21b6491b371,46261,1733327538339 2024-12-04T15:52:19,254 DEBUG [pool-65-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43197-0x1017e2d30160001, quorum=127.0.0.1:64574, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-04T15:52:19,254 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39883-0x1017e2d30160002, quorum=127.0.0.1:64574, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-04T15:52:19,254 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46261-0x1017e2d30160000, quorum=127.0.0.1:64574, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T15:52:19,254 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35881-0x1017e2d30160003, quorum=127.0.0.1:64574, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-04T15:52:19,254 DEBUG [pool-65-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43197-0x1017e2d30160001, quorum=127.0.0.1:64574, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T15:52:19,254 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39883-0x1017e2d30160002, quorum=127.0.0.1:64574, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T15:52:19,254 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35881-0x1017e2d30160003, quorum=127.0.0.1:64574, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T15:52:19,255 DEBUG [master/a21b6491b371:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:46261-0x1017e2d30160000, quorum=127.0.0.1:64574, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-04T15:52:19,256 INFO [master/a21b6491b371:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/a21b6491b371,46261,1733327538339 from backup master directory 2024-12-04T15:52:19,260 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46261-0x1017e2d30160000, quorum=127.0.0.1:64574, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/a21b6491b371,46261,1733327538339 2024-12-04T15:52:19,260 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35881-0x1017e2d30160003, quorum=127.0.0.1:64574, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-04T15:52:19,260 DEBUG [pool-65-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43197-0x1017e2d30160001, quorum=127.0.0.1:64574, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-04T15:52:19,260 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46261-0x1017e2d30160000, quorum=127.0.0.1:64574, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-04T15:52:19,260 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39883-0x1017e2d30160002, quorum=127.0.0.1:64574, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-04T15:52:19,261 WARN [master/a21b6491b371:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-04T15:52:19,261 INFO [master/a21b6491b371:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=a21b6491b371,46261,1733327538339 2024-12-04T15:52:19,263 INFO [master/a21b6491b371:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-12-04T15:52:19,264 INFO [master/a21b6491b371:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-12-04T15:52:19,330 DEBUG [master/a21b6491b371:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:34217/user/jenkins/test-data/f13b5a61-6504-4cf8-1d52-bcda544fecc1/hbase.id] with ID: db5896f3-0241-4aba-9203-13a7561ae661 2024-12-04T15:52:19,330 DEBUG [master/a21b6491b371:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:34217/user/jenkins/test-data/f13b5a61-6504-4cf8-1d52-bcda544fecc1/.tmp/hbase.id 2024-12-04T15:52:19,337 WARN [master/a21b6491b371:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-04T15:52:19,337 WARN [master/a21b6491b371:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-04T15:52:19,340 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-780551652_22 at /127.0.0.1:38316 [Receiving block BP-818689816-172.17.0.2-1733327535191:blk_-9223372036854775776_1003] {}] datanode.DataXceiver(331): 127.0.0.1:41925:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:38316 dst: /127.0.0.1:41925 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:52:19,345 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41925 is added to blk_-9223372036854775776_1004 (size=42) 2024-12-04T15:52:19,346 WARN [master/a21b6491b371:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-04T15:52:19,346 DEBUG [master/a21b6491b371:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:34217/user/jenkins/test-data/f13b5a61-6504-4cf8-1d52-bcda544fecc1/.tmp/hbase.id]:[hdfs://localhost:34217/user/jenkins/test-data/f13b5a61-6504-4cf8-1d52-bcda544fecc1/hbase.id] 2024-12-04T15:52:19,389 INFO [master/a21b6491b371:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-04T15:52:19,394 INFO [master/a21b6491b371:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-04T15:52:19,414 INFO [master/a21b6491b371:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 18ms. 2024-12-04T15:52:19,418 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35881-0x1017e2d30160003, quorum=127.0.0.1:64574, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T15:52:19,418 DEBUG [pool-65-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43197-0x1017e2d30160001, quorum=127.0.0.1:64574, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T15:52:19,418 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46261-0x1017e2d30160000, quorum=127.0.0.1:64574, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T15:52:19,419 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39883-0x1017e2d30160002, quorum=127.0.0.1:64574, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T15:52:19,431 WARN [master/a21b6491b371:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-04T15:52:19,432 WARN [master/a21b6491b371:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-04T15:52:19,435 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-780551652_22 at /127.0.0.1:52108 [Receiving block BP-818689816-172.17.0.2-1733327535191:blk_-9223372036854775760_1005] {}] datanode.DataXceiver(331): 127.0.0.1:45289:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:52108 dst: /127.0.0.1:45289 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:52:19,439 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45289 is added to blk_-9223372036854775760_1006 (size=196) 2024-12-04T15:52:19,440 WARN [master/a21b6491b371:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-04T15:52:19,455 INFO [master/a21b6491b371:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-04T15:52:19,457 INFO [master/a21b6491b371:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-04T15:52:19,463 INFO [master/a21b6491b371:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-04T15:52:19,491 WARN [master/a21b6491b371:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-04T15:52:19,491 WARN [master/a21b6491b371:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-04T15:52:19,494 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-780551652_22 at /127.0.0.1:52126 [Receiving block BP-818689816-172.17.0.2-1733327535191:blk_-9223372036854775744_1007] {}] datanode.DataXceiver(331): 127.0.0.1:45289:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:52126 dst: /127.0.0.1:45289 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:52:19,500 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45289 is added to blk_-9223372036854775744_1008 (size=1189) 2024-12-04T15:52:19,501 WARN [master/a21b6491b371:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-04T15:52:19,519 INFO [master/a21b6491b371:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:34217/user/jenkins/test-data/f13b5a61-6504-4cf8-1d52-bcda544fecc1/MasterData/data/master/store 2024-12-04T15:52:19,535 WARN [master/a21b6491b371:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-04T15:52:19,535 WARN [master/a21b6491b371:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-04T15:52:19,538 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-780551652_22 at /127.0.0.1:60552 [Receiving block BP-818689816-172.17.0.2-1733327535191:blk_-9223372036854775728_1009] {}] datanode.DataXceiver(331): 127.0.0.1:33405:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60552 dst: /127.0.0.1:33405 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:52:19,543 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33405 is added to blk_-9223372036854775728_1010 (size=34) 2024-12-04T15:52:19,543 WARN [master/a21b6491b371:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-04T15:52:19,548 INFO [master/a21b6491b371:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-12-04T15:52:19,551 DEBUG [master/a21b6491b371:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-04T15:52:19,552 DEBUG [master/a21b6491b371:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-04T15:52:19,553 INFO [master/a21b6491b371:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-04T15:52:19,553 DEBUG [master/a21b6491b371:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-04T15:52:19,555 DEBUG [master/a21b6491b371:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-04T15:52:19,555 DEBUG [master/a21b6491b371:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-04T15:52:19,555 INFO [master/a21b6491b371:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-04T15:52:19,556 DEBUG [master/a21b6491b371:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733327539552Disabling compacts and flushes for region at 1733327539552Disabling writes for close at 1733327539555 (+3 ms)Writing region close event to WAL at 1733327539555Closed at 1733327539555 2024-12-04T15:52:19,559 WARN [master/a21b6491b371:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:34217/user/jenkins/test-data/f13b5a61-6504-4cf8-1d52-bcda544fecc1/MasterData/data/master/store/.initializing 2024-12-04T15:52:19,559 DEBUG [master/a21b6491b371:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:34217/user/jenkins/test-data/f13b5a61-6504-4cf8-1d52-bcda544fecc1/MasterData/WALs/a21b6491b371,46261,1733327538339 2024-12-04T15:52:19,567 INFO [master/a21b6491b371:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-04T15:52:19,581 INFO [master/a21b6491b371:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=a21b6491b371%2C46261%2C1733327538339, suffix=, logDir=hdfs://localhost:34217/user/jenkins/test-data/f13b5a61-6504-4cf8-1d52-bcda544fecc1/MasterData/WALs/a21b6491b371,46261,1733327538339, archiveDir=hdfs://localhost:34217/user/jenkins/test-data/f13b5a61-6504-4cf8-1d52-bcda544fecc1/MasterData/oldWALs, maxLogs=10 2024-12-04T15:52:19,612 DEBUG [master/a21b6491b371:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/f13b5a61-6504-4cf8-1d52-bcda544fecc1/MasterData/WALs/a21b6491b371,46261,1733327538339/a21b6491b371%2C46261%2C1733327538339.1733327539585, exclude list is [], retry=0 2024-12-04T15:52:19,632 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(244): No decryptEncryptedDataEncryptionKey method in DFSClient, should be hadoop version with HDFS-12396 java.lang.NoSuchMethodException: org.apache.hadoop.hdfs.DFSClient.decryptEncryptedDataEncryptionKey(org.apache.hadoop.fs.FileEncryptionInfo) at java.lang.Class.getDeclaredMethod(Class.java:2675) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelperWithoutHDFS12396(FanOutOneBlockAsyncDFSOutputSaslHelper.java:183) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelper(FanOutOneBlockAsyncDFSOutputSaslHelper.java:242) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.(FanOutOneBlockAsyncDFSOutputSaslHelper.java:253) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.initialize(FanOutOneBlockAsyncDFSOutputHelper.java:413) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper$5.operationComplete(FanOutOneBlockAsyncDFSOutputHelper.java:472) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper$5.operationComplete(FanOutOneBlockAsyncDFSOutputHelper.java:467) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.NettyFutureUtils.lambda$addListener$0(NettyFutureUtils.java:56) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListener0(DefaultPromise.java:590) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListeners0(DefaultPromise.java:583) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListenersNow(DefaultPromise.java:559) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListeners(DefaultPromise.java:492) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.setValue0(DefaultPromise.java:636) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.setSuccess0(DefaultPromise.java:625) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.trySuccess(DefaultPromise.java:105) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPromise.trySuccess(DefaultChannelPromise.java:84) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.fulfillConnectPromise(AbstractEpollChannel.java:658) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.finishConnect(AbstractEpollChannel.java:696) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.epollOutReady(AbstractEpollChannel.java:567) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.processReady(EpollEventLoop.java:491) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:399) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:52:19,633 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:41925,DS-6e325b22-77ed-4b3a-bc7d-5936710c2006,DISK] 2024-12-04T15:52:19,633 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:45289,DS-52a8bfb8-612f-49d2-ab50-041a00986f87,DISK] 2024-12-04T15:52:19,633 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:33405,DS-effbfd14-6aeb-4d03-9115-05142749f245,DISK] 2024-12-04T15:52:19,637 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.ProtobufDecoder(117): Hadoop 3.3 and above shades protobuf. 2024-12-04T15:52:19,677 INFO [master/a21b6491b371:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/f13b5a61-6504-4cf8-1d52-bcda544fecc1/MasterData/WALs/a21b6491b371,46261,1733327538339/a21b6491b371%2C46261%2C1733327538339.1733327539585 2024-12-04T15:52:19,678 DEBUG [master/a21b6491b371:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:40533:40533),(127.0.0.1/127.0.0.1:37259:37259),(127.0.0.1/127.0.0.1:38711:38711)] 2024-12-04T15:52:19,678 DEBUG [master/a21b6491b371:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-04T15:52:19,678 DEBUG [master/a21b6491b371:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-04T15:52:19,682 DEBUG [master/a21b6491b371:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-04T15:52:19,683 DEBUG [master/a21b6491b371:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-04T15:52:19,719 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-04T15:52:19,750 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-04T15:52:19,754 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:52:19,757 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-04T15:52:19,758 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-04T15:52:19,761 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-04T15:52:19,761 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:52:19,762 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-04T15:52:19,763 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-04T15:52:19,766 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-04T15:52:19,766 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:52:19,767 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-04T15:52:19,767 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-04T15:52:19,770 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-04T15:52:19,770 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:52:19,771 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-04T15:52:19,772 DEBUG [master/a21b6491b371:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-04T15:52:19,775 DEBUG [master/a21b6491b371:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34217/user/jenkins/test-data/f13b5a61-6504-4cf8-1d52-bcda544fecc1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-04T15:52:19,776 DEBUG [master/a21b6491b371:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34217/user/jenkins/test-data/f13b5a61-6504-4cf8-1d52-bcda544fecc1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-04T15:52:19,781 DEBUG [master/a21b6491b371:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-04T15:52:19,781 DEBUG [master/a21b6491b371:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-04T15:52:19,784 DEBUG [master/a21b6491b371:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-04T15:52:19,788 DEBUG [master/a21b6491b371:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-04T15:52:19,794 DEBUG [master/a21b6491b371:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34217/user/jenkins/test-data/f13b5a61-6504-4cf8-1d52-bcda544fecc1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-04T15:52:19,795 INFO [master/a21b6491b371:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=64192285, jitterRate=-0.043460413813591}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-04T15:52:19,803 DEBUG [master/a21b6491b371:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733327539694Initializing all the Stores at 1733327539697 (+3 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733327539697Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733327539698 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733327539698Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733327539698Cleaning up temporary data from old regions at 1733327539781 (+83 ms)Region opened successfully at 1733327539803 (+22 ms) 2024-12-04T15:52:19,804 INFO [master/a21b6491b371:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-04T15:52:19,835 DEBUG [master/a21b6491b371:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@49ca50fb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=a21b6491b371/172.17.0.2:0 2024-12-04T15:52:19,868 INFO [master/a21b6491b371:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-04T15:52:19,879 INFO [master/a21b6491b371:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-04T15:52:19,879 INFO [master/a21b6491b371:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-04T15:52:19,882 INFO [master/a21b6491b371:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-04T15:52:19,884 INFO [master/a21b6491b371:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 1 msec 2024-12-04T15:52:19,889 INFO [master/a21b6491b371:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 4 msec 2024-12-04T15:52:19,889 INFO [master/a21b6491b371:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-04T15:52:19,915 INFO [master/a21b6491b371:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-04T15:52:19,923 DEBUG [master/a21b6491b371:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46261-0x1017e2d30160000, quorum=127.0.0.1:64574, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-04T15:52:19,925 DEBUG [master/a21b6491b371:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-04T15:52:19,929 INFO [master/a21b6491b371:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-04T15:52:19,930 DEBUG [master/a21b6491b371:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46261-0x1017e2d30160000, quorum=127.0.0.1:64574, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-04T15:52:19,931 DEBUG [master/a21b6491b371:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-04T15:52:19,934 INFO [master/a21b6491b371:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-04T15:52:19,938 DEBUG [master/a21b6491b371:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46261-0x1017e2d30160000, quorum=127.0.0.1:64574, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-04T15:52:19,939 DEBUG [master/a21b6491b371:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-04T15:52:19,941 DEBUG [master/a21b6491b371:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46261-0x1017e2d30160000, quorum=127.0.0.1:64574, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-04T15:52:19,944 DEBUG [master/a21b6491b371:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-04T15:52:19,962 DEBUG [master/a21b6491b371:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46261-0x1017e2d30160000, quorum=127.0.0.1:64574, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-04T15:52:19,963 DEBUG [master/a21b6491b371:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-04T15:52:19,967 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35881-0x1017e2d30160003, quorum=127.0.0.1:64574, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-04T15:52:19,967 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46261-0x1017e2d30160000, quorum=127.0.0.1:64574, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-04T15:52:19,967 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39883-0x1017e2d30160002, quorum=127.0.0.1:64574, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-04T15:52:19,967 DEBUG [pool-65-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43197-0x1017e2d30160001, quorum=127.0.0.1:64574, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-04T15:52:19,967 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35881-0x1017e2d30160003, quorum=127.0.0.1:64574, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T15:52:19,967 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46261-0x1017e2d30160000, quorum=127.0.0.1:64574, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T15:52:19,967 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39883-0x1017e2d30160002, quorum=127.0.0.1:64574, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T15:52:19,967 DEBUG [pool-65-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43197-0x1017e2d30160001, quorum=127.0.0.1:64574, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T15:52:19,970 INFO [master/a21b6491b371:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=a21b6491b371,46261,1733327538339, sessionid=0x1017e2d30160000, setting cluster-up flag (Was=false) 2024-12-04T15:52:19,983 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46261-0x1017e2d30160000, quorum=127.0.0.1:64574, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T15:52:19,983 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39883-0x1017e2d30160002, quorum=127.0.0.1:64574, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T15:52:19,983 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35881-0x1017e2d30160003, quorum=127.0.0.1:64574, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T15:52:19,983 DEBUG [pool-65-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43197-0x1017e2d30160001, quorum=127.0.0.1:64574, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T15:52:19,988 DEBUG [master/a21b6491b371:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-04T15:52:19,990 DEBUG [master/a21b6491b371:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=a21b6491b371,46261,1733327538339 2024-12-04T15:52:19,999 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46261-0x1017e2d30160000, quorum=127.0.0.1:64574, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T15:52:19,999 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35881-0x1017e2d30160003, quorum=127.0.0.1:64574, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T15:52:19,999 DEBUG [pool-65-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43197-0x1017e2d30160001, quorum=127.0.0.1:64574, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T15:52:19,999 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39883-0x1017e2d30160002, quorum=127.0.0.1:64574, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T15:52:20,005 DEBUG [master/a21b6491b371:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-04T15:52:20,006 DEBUG [master/a21b6491b371:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=a21b6491b371,46261,1733327538339 2024-12-04T15:52:20,012 INFO [master/a21b6491b371:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:34217/user/jenkins/test-data/f13b5a61-6504-4cf8-1d52-bcda544fecc1/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-04T15:52:20,085 DEBUG [master/a21b6491b371:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-04T15:52:20,095 INFO [master/a21b6491b371:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-04T15:52:20,101 INFO [master/a21b6491b371:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-04T15:52:20,107 DEBUG [master/a21b6491b371:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: a21b6491b371,46261,1733327538339 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-04T15:52:20,111 INFO [RS:1;a21b6491b371:39883 {}] regionserver.HRegionServer(746): ClusterId : db5896f3-0241-4aba-9203-13a7561ae661 2024-12-04T15:52:20,112 INFO [RS:0;a21b6491b371:43197 {}] regionserver.HRegionServer(746): ClusterId : db5896f3-0241-4aba-9203-13a7561ae661 2024-12-04T15:52:20,112 INFO [RS:2;a21b6491b371:35881 {}] regionserver.HRegionServer(746): ClusterId : db5896f3-0241-4aba-9203-13a7561ae661 2024-12-04T15:52:20,114 DEBUG [master/a21b6491b371:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/a21b6491b371:0, corePoolSize=5, maxPoolSize=5 2024-12-04T15:52:20,114 DEBUG [master/a21b6491b371:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/a21b6491b371:0, corePoolSize=5, maxPoolSize=5 2024-12-04T15:52:20,115 DEBUG [master/a21b6491b371:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/a21b6491b371:0, corePoolSize=5, maxPoolSize=5 2024-12-04T15:52:20,115 DEBUG [master/a21b6491b371:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/a21b6491b371:0, corePoolSize=5, maxPoolSize=5 2024-12-04T15:52:20,115 DEBUG [master/a21b6491b371:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/a21b6491b371:0, corePoolSize=10, maxPoolSize=10 2024-12-04T15:52:20,115 DEBUG [RS:0;a21b6491b371:43197 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-04T15:52:20,115 DEBUG [RS:1;a21b6491b371:39883 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-04T15:52:20,115 DEBUG [RS:2;a21b6491b371:35881 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-04T15:52:20,115 DEBUG [master/a21b6491b371:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/a21b6491b371:0, corePoolSize=1, maxPoolSize=1 2024-12-04T15:52:20,115 DEBUG [master/a21b6491b371:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/a21b6491b371:0, corePoolSize=2, maxPoolSize=2 2024-12-04T15:52:20,116 DEBUG [master/a21b6491b371:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/a21b6491b371:0, corePoolSize=1, maxPoolSize=1 2024-12-04T15:52:20,119 INFO [master/a21b6491b371:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733327570119 2024-12-04T15:52:20,121 INFO [master/a21b6491b371:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-04T15:52:20,122 DEBUG [RS:2;a21b6491b371:35881 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-04T15:52:20,122 DEBUG [RS:1;a21b6491b371:39883 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-04T15:52:20,122 DEBUG [RS:0;a21b6491b371:43197 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-04T15:52:20,122 DEBUG [RS:2;a21b6491b371:35881 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-04T15:52:20,122 DEBUG [RS:1;a21b6491b371:39883 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-04T15:52:20,122 DEBUG [RS:0;a21b6491b371:43197 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-04T15:52:20,122 INFO [master/a21b6491b371:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-04T15:52:20,124 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-04T15:52:20,125 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-04T15:52:20,126 INFO [master/a21b6491b371:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-04T15:52:20,127 INFO [master/a21b6491b371:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-04T15:52:20,127 INFO [master/a21b6491b371:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-04T15:52:20,127 INFO [master/a21b6491b371:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-04T15:52:20,130 DEBUG [RS:2;a21b6491b371:35881 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-04T15:52:20,130 DEBUG [RS:1;a21b6491b371:39883 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-04T15:52:20,130 DEBUG [RS:1;a21b6491b371:39883 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7e06349, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=a21b6491b371/172.17.0.2:0 2024-12-04T15:52:20,130 DEBUG [RS:2;a21b6491b371:35881 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@221af23, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=a21b6491b371/172.17.0.2:0 2024-12-04T15:52:20,128 INFO [master/a21b6491b371:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-04T15:52:20,131 DEBUG [RS:0;a21b6491b371:43197 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-04T15:52:20,131 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:52:20,131 DEBUG [RS:0;a21b6491b371:43197 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@37fd47c4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=a21b6491b371/172.17.0.2:0 2024-12-04T15:52:20,132 INFO [master/a21b6491b371:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-04T15:52:20,131 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-04T15:52:20,133 INFO [master/a21b6491b371:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-04T15:52:20,134 INFO [master/a21b6491b371:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-04T15:52:20,146 INFO [master/a21b6491b371:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-04T15:52:20,146 INFO [master/a21b6491b371:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-04T15:52:20,148 DEBUG [master/a21b6491b371:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/a21b6491b371:0:becomeActiveMaster-HFileCleaner.large.0-1733327540147,5,FailOnTimeoutGroup] 2024-12-04T15:52:20,150 DEBUG [master/a21b6491b371:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/a21b6491b371:0:becomeActiveMaster-HFileCleaner.small.0-1733327540149,5,FailOnTimeoutGroup] 2024-12-04T15:52:20,150 INFO [master/a21b6491b371:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-04T15:52:20,150 INFO [master/a21b6491b371:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-04T15:52:20,152 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-04T15:52:20,152 INFO [master/a21b6491b371:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-04T15:52:20,152 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-04T15:52:20,152 DEBUG [RS:0;a21b6491b371:43197 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;a21b6491b371:43197 2024-12-04T15:52:20,152 DEBUG [RS:1;a21b6491b371:39883 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;a21b6491b371:39883 2024-12-04T15:52:20,152 INFO [master/a21b6491b371:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-04T15:52:20,152 DEBUG [RS:2;a21b6491b371:35881 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:2;a21b6491b371:35881 2024-12-04T15:52:20,155 INFO [RS:0;a21b6491b371:43197 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-04T15:52:20,155 INFO [RS:1;a21b6491b371:39883 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-04T15:52:20,155 INFO [RS:2;a21b6491b371:35881 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-04T15:52:20,155 INFO [RS:0;a21b6491b371:43197 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-04T15:52:20,155 INFO [RS:2;a21b6491b371:35881 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-04T15:52:20,155 INFO [RS:1;a21b6491b371:39883 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-04T15:52:20,155 DEBUG [RS:0;a21b6491b371:43197 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-04T15:52:20,155 DEBUG [RS:2;a21b6491b371:35881 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-04T15:52:20,155 DEBUG [RS:1;a21b6491b371:39883 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-04T15:52:20,158 INFO [RS:2;a21b6491b371:35881 {}] regionserver.HRegionServer(2659): reportForDuty to master=a21b6491b371,46261,1733327538339 with port=35881, startcode=1733327539185 2024-12-04T15:52:20,159 INFO [RS:0;a21b6491b371:43197 {}] regionserver.HRegionServer(2659): reportForDuty to master=a21b6491b371,46261,1733327538339 with port=43197, startcode=1733327539032 2024-12-04T15:52:20,159 INFO [RS:1;a21b6491b371:39883 {}] regionserver.HRegionServer(2659): reportForDuty to master=a21b6491b371,46261,1733327538339 with port=39883, startcode=1733327539139 2024-12-04T15:52:20,166 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-780551652_22 at /127.0.0.1:52154 [Receiving block BP-818689816-172.17.0.2-1733327535191:blk_-9223372036854775712_1012] {}] datanode.DataXceiver(331): 127.0.0.1:45289:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:52154 dst: /127.0.0.1:45289 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:52:20,174 DEBUG [RS:0;a21b6491b371:43197 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-04T15:52:20,175 DEBUG [RS:1;a21b6491b371:39883 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-04T15:52:20,175 DEBUG [RS:2;a21b6491b371:35881 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-04T15:52:20,179 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-04T15:52:20,180 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:34217/user/jenkins/test-data/f13b5a61-6504-4cf8-1d52-bcda544fecc1/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-04T15:52:20,181 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:34217/user/jenkins/test-data/f13b5a61-6504-4cf8-1d52-bcda544fecc1 2024-12-04T15:52:20,209 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45289 is added to blk_-9223372036854775789_1002 (size=7) 2024-12-04T15:52:20,210 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41925 is added to blk_-9223372036854775788_1002 (size=7) 2024-12-04T15:52:20,221 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-04T15:52:20,222 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-04T15:52:20,228 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58257, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-12-04T15:52:20,228 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40289, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-12-04T15:52:20,228 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59103, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-12-04T15:52:20,234 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46261 {}] master.ServerManager(363): Checking decommissioned status of RegionServer a21b6491b371,39883,1733327539139 2024-12-04T15:52:20,236 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-780551652_22 at /127.0.0.1:60594 [Receiving block BP-818689816-172.17.0.2-1733327535191:blk_-9223372036854775696_1014] {}] datanode.DataXceiver(331): 127.0.0.1:33405:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60594 dst: /127.0.0.1:33405 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:52:20,237 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46261 {}] master.ServerManager(517): Registering regionserver=a21b6491b371,39883,1733327539139 2024-12-04T15:52:20,249 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46261 {}] master.ServerManager(363): Checking decommissioned status of RegionServer a21b6491b371,35881,1733327539185 2024-12-04T15:52:20,250 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46261 {}] master.ServerManager(517): Registering regionserver=a21b6491b371,35881,1733327539185 2024-12-04T15:52:20,251 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33405 is added to blk_-9223372036854775696_1015 (size=32) 2024-12-04T15:52:20,253 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-04T15:52:20,254 DEBUG [RS:1;a21b6491b371:39883 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:34217/user/jenkins/test-data/f13b5a61-6504-4cf8-1d52-bcda544fecc1 2024-12-04T15:52:20,254 DEBUG [RS:1;a21b6491b371:39883 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:34217 2024-12-04T15:52:20,254 DEBUG [RS:1;a21b6491b371:39883 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-04T15:52:20,255 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-04T15:52:20,256 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46261 {}] master.ServerManager(363): Checking decommissioned status of RegionServer a21b6491b371,43197,1733327539032 2024-12-04T15:52:20,256 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46261 {}] master.ServerManager(517): Registering regionserver=a21b6491b371,43197,1733327539032 2024-12-04T15:52:20,257 DEBUG [RS:2;a21b6491b371:35881 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:34217/user/jenkins/test-data/f13b5a61-6504-4cf8-1d52-bcda544fecc1 2024-12-04T15:52:20,257 DEBUG [RS:2;a21b6491b371:35881 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:34217 2024-12-04T15:52:20,257 DEBUG [RS:2;a21b6491b371:35881 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-04T15:52:20,259 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-04T15:52:20,260 DEBUG [RS:0;a21b6491b371:43197 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:34217/user/jenkins/test-data/f13b5a61-6504-4cf8-1d52-bcda544fecc1 2024-12-04T15:52:20,261 DEBUG [RS:0;a21b6491b371:43197 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:34217 2024-12-04T15:52:20,261 DEBUG [RS:0;a21b6491b371:43197 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-04T15:52:20,262 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-04T15:52:20,262 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:52:20,262 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46261-0x1017e2d30160000, quorum=127.0.0.1:64574, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-04T15:52:20,263 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-04T15:52:20,264 DEBUG [RS:2;a21b6491b371:35881 {}] zookeeper.ZKUtil(111): regionserver:35881-0x1017e2d30160003, quorum=127.0.0.1:64574, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/a21b6491b371,35881,1733327539185 2024-12-04T15:52:20,264 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-04T15:52:20,264 DEBUG [RS:1;a21b6491b371:39883 {}] zookeeper.ZKUtil(111): regionserver:39883-0x1017e2d30160002, quorum=127.0.0.1:64574, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/a21b6491b371,39883,1733327539139 2024-12-04T15:52:20,264 WARN [RS:2;a21b6491b371:35881 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-04T15:52:20,264 WARN [RS:1;a21b6491b371:39883 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-04T15:52:20,264 INFO [RS:1;a21b6491b371:39883 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-04T15:52:20,264 INFO [RS:2;a21b6491b371:35881 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-04T15:52:20,264 DEBUG [RS:2;a21b6491b371:35881 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:34217/user/jenkins/test-data/f13b5a61-6504-4cf8-1d52-bcda544fecc1/WALs/a21b6491b371,35881,1733327539185 2024-12-04T15:52:20,264 DEBUG [RS:1;a21b6491b371:39883 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:34217/user/jenkins/test-data/f13b5a61-6504-4cf8-1d52-bcda544fecc1/WALs/a21b6491b371,39883,1733327539139 2024-12-04T15:52:20,266 DEBUG [RS:0;a21b6491b371:43197 {}] zookeeper.ZKUtil(111): regionserver:43197-0x1017e2d30160001, quorum=127.0.0.1:64574, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/a21b6491b371,43197,1733327539032 2024-12-04T15:52:20,266 WARN [RS:0;a21b6491b371:43197 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-04T15:52:20,266 INFO [RS:0;a21b6491b371:43197 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-04T15:52:20,266 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-04T15:52:20,266 DEBUG [RS:0;a21b6491b371:43197 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:34217/user/jenkins/test-data/f13b5a61-6504-4cf8-1d52-bcda544fecc1/WALs/a21b6491b371,43197,1733327539032 2024-12-04T15:52:20,267 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [a21b6491b371,39883,1733327539139] 2024-12-04T15:52:20,267 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:52:20,267 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [a21b6491b371,43197,1733327539032] 2024-12-04T15:52:20,267 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [a21b6491b371,35881,1733327539185] 2024-12-04T15:52:20,268 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-04T15:52:20,268 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-04T15:52:20,271 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-04T15:52:20,271 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:52:20,272 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-04T15:52:20,272 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-04T15:52:20,275 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-04T15:52:20,275 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:52:20,276 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-04T15:52:20,277 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-04T15:52:20,278 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34217/user/jenkins/test-data/f13b5a61-6504-4cf8-1d52-bcda544fecc1/data/hbase/meta/1588230740 2024-12-04T15:52:20,279 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34217/user/jenkins/test-data/f13b5a61-6504-4cf8-1d52-bcda544fecc1/data/hbase/meta/1588230740 2024-12-04T15:52:20,282 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-04T15:52:20,282 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-04T15:52:20,283 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-04T15:52:20,285 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-04T15:52:20,293 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34217/user/jenkins/test-data/f13b5a61-6504-4cf8-1d52-bcda544fecc1/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-04T15:52:20,294 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=64738294, jitterRate=-0.03532424569129944}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-04T15:52:20,298 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733327540255Initializing all the Stores at 1733327540259 (+4 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733327540259Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733327540259Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733327540259Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733327540259Cleaning up temporary data from old regions at 1733327540282 (+23 ms)Region opened successfully at 1733327540298 (+16 ms) 2024-12-04T15:52:20,298 INFO [RS:2;a21b6491b371:35881 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-04T15:52:20,298 INFO [RS:1;a21b6491b371:39883 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-04T15:52:20,298 INFO [RS:0;a21b6491b371:43197 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-04T15:52:20,298 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-04T15:52:20,298 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-04T15:52:20,298 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-04T15:52:20,298 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-04T15:52:20,298 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-04T15:52:20,300 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-04T15:52:20,300 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733327540298Disabling compacts and flushes for region at 1733327540298Disabling writes for close at 1733327540298Writing region close event to WAL at 1733327540299 (+1 ms)Closed at 1733327540299 2024-12-04T15:52:20,303 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-04T15:52:20,303 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-04T15:52:20,310 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-04T15:52:20,313 INFO [RS:2;a21b6491b371:35881 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-04T15:52:20,313 INFO [RS:1;a21b6491b371:39883 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-04T15:52:20,315 INFO [RS:0;a21b6491b371:43197 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-04T15:52:20,320 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-04T15:52:20,321 INFO [RS:0;a21b6491b371:43197 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-04T15:52:20,321 INFO [RS:1;a21b6491b371:39883 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-04T15:52:20,321 INFO [RS:2;a21b6491b371:35881 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-04T15:52:20,321 INFO [RS:1;a21b6491b371:39883 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-04T15:52:20,321 INFO [RS:2;a21b6491b371:35881 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-04T15:52:20,321 INFO [RS:0;a21b6491b371:43197 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-04T15:52:20,324 INFO [RS:2;a21b6491b371:35881 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-04T15:52:20,324 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-04T15:52:20,328 INFO [RS:1;a21b6491b371:39883 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-04T15:52:20,328 INFO [RS:0;a21b6491b371:43197 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-04T15:52:20,330 INFO [RS:0;a21b6491b371:43197 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-04T15:52:20,330 INFO [RS:1;a21b6491b371:39883 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-04T15:52:20,330 INFO [RS:2;a21b6491b371:35881 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-04T15:52:20,332 INFO [RS:1;a21b6491b371:39883 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-04T15:52:20,332 INFO [RS:2;a21b6491b371:35881 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-04T15:52:20,332 INFO [RS:0;a21b6491b371:43197 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-04T15:52:20,332 DEBUG [RS:0;a21b6491b371:43197 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/a21b6491b371:0, corePoolSize=1, maxPoolSize=1 2024-12-04T15:52:20,332 DEBUG [RS:1;a21b6491b371:39883 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/a21b6491b371:0, corePoolSize=1, maxPoolSize=1 2024-12-04T15:52:20,332 DEBUG [RS:2;a21b6491b371:35881 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/a21b6491b371:0, corePoolSize=1, maxPoolSize=1 2024-12-04T15:52:20,332 DEBUG [RS:0;a21b6491b371:43197 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/a21b6491b371:0, corePoolSize=1, maxPoolSize=1 2024-12-04T15:52:20,332 DEBUG [RS:2;a21b6491b371:35881 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/a21b6491b371:0, corePoolSize=1, maxPoolSize=1 2024-12-04T15:52:20,332 DEBUG [RS:2;a21b6491b371:35881 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/a21b6491b371:0, corePoolSize=1, maxPoolSize=1 2024-12-04T15:52:20,332 DEBUG [RS:0;a21b6491b371:43197 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/a21b6491b371:0, corePoolSize=1, maxPoolSize=1 2024-12-04T15:52:20,332 DEBUG [RS:1;a21b6491b371:39883 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/a21b6491b371:0, corePoolSize=1, maxPoolSize=1 2024-12-04T15:52:20,332 DEBUG [RS:2;a21b6491b371:35881 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/a21b6491b371:0, corePoolSize=1, maxPoolSize=1 2024-12-04T15:52:20,332 DEBUG [RS:1;a21b6491b371:39883 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/a21b6491b371:0, corePoolSize=1, maxPoolSize=1 2024-12-04T15:52:20,332 DEBUG [RS:0;a21b6491b371:43197 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/a21b6491b371:0, corePoolSize=1, maxPoolSize=1 2024-12-04T15:52:20,333 DEBUG [RS:2;a21b6491b371:35881 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/a21b6491b371:0, corePoolSize=1, maxPoolSize=1 2024-12-04T15:52:20,333 DEBUG [RS:1;a21b6491b371:39883 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/a21b6491b371:0, corePoolSize=1, maxPoolSize=1 2024-12-04T15:52:20,333 DEBUG [RS:0;a21b6491b371:43197 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/a21b6491b371:0, corePoolSize=1, maxPoolSize=1 2024-12-04T15:52:20,333 DEBUG [RS:1;a21b6491b371:39883 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/a21b6491b371:0, corePoolSize=1, maxPoolSize=1 2024-12-04T15:52:20,333 DEBUG [RS:0;a21b6491b371:43197 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/a21b6491b371:0, corePoolSize=2, maxPoolSize=2 2024-12-04T15:52:20,333 DEBUG [RS:2;a21b6491b371:35881 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/a21b6491b371:0, corePoolSize=2, maxPoolSize=2 2024-12-04T15:52:20,333 DEBUG [RS:1;a21b6491b371:39883 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/a21b6491b371:0, corePoolSize=2, maxPoolSize=2 2024-12-04T15:52:20,333 DEBUG [RS:0;a21b6491b371:43197 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/a21b6491b371:0, corePoolSize=1, maxPoolSize=1 2024-12-04T15:52:20,333 DEBUG [RS:1;a21b6491b371:39883 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/a21b6491b371:0, corePoolSize=1, maxPoolSize=1 2024-12-04T15:52:20,333 DEBUG [RS:0;a21b6491b371:43197 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/a21b6491b371:0, corePoolSize=1, maxPoolSize=1 2024-12-04T15:52:20,333 DEBUG [RS:2;a21b6491b371:35881 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/a21b6491b371:0, corePoolSize=1, maxPoolSize=1 2024-12-04T15:52:20,333 DEBUG [RS:1;a21b6491b371:39883 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/a21b6491b371:0, corePoolSize=1, maxPoolSize=1 2024-12-04T15:52:20,333 DEBUG [RS:0;a21b6491b371:43197 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/a21b6491b371:0, corePoolSize=1, maxPoolSize=1 2024-12-04T15:52:20,333 DEBUG [RS:1;a21b6491b371:39883 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/a21b6491b371:0, corePoolSize=1, maxPoolSize=1 2024-12-04T15:52:20,333 DEBUG [RS:2;a21b6491b371:35881 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/a21b6491b371:0, corePoolSize=1, maxPoolSize=1 2024-12-04T15:52:20,333 DEBUG [RS:0;a21b6491b371:43197 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/a21b6491b371:0, corePoolSize=1, maxPoolSize=1 2024-12-04T15:52:20,333 DEBUG [RS:1;a21b6491b371:39883 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/a21b6491b371:0, corePoolSize=1, maxPoolSize=1 2024-12-04T15:52:20,333 DEBUG [RS:0;a21b6491b371:43197 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/a21b6491b371:0, corePoolSize=1, maxPoolSize=1 2024-12-04T15:52:20,333 DEBUG [RS:1;a21b6491b371:39883 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/a21b6491b371:0, corePoolSize=1, maxPoolSize=1 2024-12-04T15:52:20,333 DEBUG [RS:2;a21b6491b371:35881 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/a21b6491b371:0, corePoolSize=1, maxPoolSize=1 2024-12-04T15:52:20,333 DEBUG [RS:0;a21b6491b371:43197 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/a21b6491b371:0, corePoolSize=1, maxPoolSize=1 2024-12-04T15:52:20,333 DEBUG [RS:1;a21b6491b371:39883 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/a21b6491b371:0, corePoolSize=1, maxPoolSize=1 2024-12-04T15:52:20,333 DEBUG [RS:2;a21b6491b371:35881 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/a21b6491b371:0, corePoolSize=1, maxPoolSize=1 2024-12-04T15:52:20,333 DEBUG [RS:0;a21b6491b371:43197 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/a21b6491b371:0, corePoolSize=3, maxPoolSize=3 2024-12-04T15:52:20,334 DEBUG [RS:1;a21b6491b371:39883 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/a21b6491b371:0, corePoolSize=3, maxPoolSize=3 2024-12-04T15:52:20,334 DEBUG [RS:1;a21b6491b371:39883 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/a21b6491b371:0, corePoolSize=3, maxPoolSize=3 2024-12-04T15:52:20,334 DEBUG [RS:0;a21b6491b371:43197 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/a21b6491b371:0, corePoolSize=3, maxPoolSize=3 2024-12-04T15:52:20,334 DEBUG [RS:2;a21b6491b371:35881 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/a21b6491b371:0, corePoolSize=1, maxPoolSize=1 2024-12-04T15:52:20,334 DEBUG [RS:2;a21b6491b371:35881 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/a21b6491b371:0, corePoolSize=1, maxPoolSize=1 2024-12-04T15:52:20,334 DEBUG [RS:2;a21b6491b371:35881 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/a21b6491b371:0, corePoolSize=3, maxPoolSize=3 2024-12-04T15:52:20,334 DEBUG [RS:2;a21b6491b371:35881 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/a21b6491b371:0, corePoolSize=3, maxPoolSize=3 2024-12-04T15:52:20,335 INFO [RS:1;a21b6491b371:39883 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-04T15:52:20,335 INFO [RS:1;a21b6491b371:39883 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-04T15:52:20,335 INFO [RS:1;a21b6491b371:39883 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-04T15:52:20,335 INFO [RS:1;a21b6491b371:39883 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-04T15:52:20,335 INFO [RS:1;a21b6491b371:39883 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-04T15:52:20,335 INFO [RS:1;a21b6491b371:39883 {}] hbase.ChoreService(168): Chore ScheduledChore name=a21b6491b371,39883,1733327539139-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-04T15:52:20,335 INFO [RS:0;a21b6491b371:43197 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-04T15:52:20,335 INFO [RS:0;a21b6491b371:43197 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-04T15:52:20,336 INFO [RS:0;a21b6491b371:43197 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-04T15:52:20,336 INFO [RS:0;a21b6491b371:43197 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-04T15:52:20,336 INFO [RS:0;a21b6491b371:43197 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-04T15:52:20,336 INFO [RS:0;a21b6491b371:43197 {}] hbase.ChoreService(168): Chore ScheduledChore name=a21b6491b371,43197,1733327539032-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-04T15:52:20,337 INFO [RS:2;a21b6491b371:35881 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-04T15:52:20,337 INFO [RS:2;a21b6491b371:35881 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-04T15:52:20,337 INFO [RS:2;a21b6491b371:35881 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-04T15:52:20,337 INFO [RS:2;a21b6491b371:35881 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-04T15:52:20,337 INFO [RS:2;a21b6491b371:35881 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-04T15:52:20,337 INFO [RS:2;a21b6491b371:35881 {}] hbase.ChoreService(168): Chore ScheduledChore name=a21b6491b371,35881,1733327539185-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-04T15:52:20,356 INFO [RS:1;a21b6491b371:39883 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-04T15:52:20,357 INFO [RS:0;a21b6491b371:43197 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-04T15:52:20,358 INFO [RS:1;a21b6491b371:39883 {}] hbase.ChoreService(168): Chore ScheduledChore name=a21b6491b371,39883,1733327539139-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-04T15:52:20,358 INFO [RS:0;a21b6491b371:43197 {}] hbase.ChoreService(168): Chore ScheduledChore name=a21b6491b371,43197,1733327539032-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-04T15:52:20,359 INFO [RS:1;a21b6491b371:39883 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-04T15:52:20,359 INFO [RS:0;a21b6491b371:43197 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-04T15:52:20,359 INFO [RS:1;a21b6491b371:39883 {}] regionserver.Replication(171): a21b6491b371,39883,1733327539139 started 2024-12-04T15:52:20,359 INFO [RS:0;a21b6491b371:43197 {}] regionserver.Replication(171): a21b6491b371,43197,1733327539032 started 2024-12-04T15:52:20,362 INFO [RS:2;a21b6491b371:35881 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-04T15:52:20,362 INFO [RS:2;a21b6491b371:35881 {}] hbase.ChoreService(168): Chore ScheduledChore name=a21b6491b371,35881,1733327539185-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-04T15:52:20,362 INFO [RS:2;a21b6491b371:35881 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-04T15:52:20,362 INFO [RS:2;a21b6491b371:35881 {}] regionserver.Replication(171): a21b6491b371,35881,1733327539185 started 2024-12-04T15:52:20,378 INFO [RS:1;a21b6491b371:39883 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-04T15:52:20,379 INFO [RS:1;a21b6491b371:39883 {}] regionserver.HRegionServer(1482): Serving as a21b6491b371,39883,1733327539139, RpcServer on a21b6491b371/172.17.0.2:39883, sessionid=0x1017e2d30160002 2024-12-04T15:52:20,379 DEBUG [RS:1;a21b6491b371:39883 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-04T15:52:20,379 DEBUG [RS:1;a21b6491b371:39883 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager a21b6491b371,39883,1733327539139 2024-12-04T15:52:20,380 DEBUG [RS:1;a21b6491b371:39883 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'a21b6491b371,39883,1733327539139' 2024-12-04T15:52:20,380 DEBUG [RS:1;a21b6491b371:39883 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-04T15:52:20,381 DEBUG [RS:1;a21b6491b371:39883 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-04T15:52:20,381 DEBUG [RS:1;a21b6491b371:39883 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-04T15:52:20,382 DEBUG [RS:1;a21b6491b371:39883 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-04T15:52:20,382 DEBUG [RS:1;a21b6491b371:39883 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager a21b6491b371,39883,1733327539139 2024-12-04T15:52:20,382 DEBUG [RS:1;a21b6491b371:39883 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'a21b6491b371,39883,1733327539139' 2024-12-04T15:52:20,382 DEBUG [RS:1;a21b6491b371:39883 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-04T15:52:20,382 DEBUG [RS:1;a21b6491b371:39883 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-04T15:52:20,383 DEBUG [RS:1;a21b6491b371:39883 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-04T15:52:20,383 INFO [RS:1;a21b6491b371:39883 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-04T15:52:20,383 INFO [RS:1;a21b6491b371:39883 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-04T15:52:20,383 INFO [RS:0;a21b6491b371:43197 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-04T15:52:20,383 INFO [RS:0;a21b6491b371:43197 {}] regionserver.HRegionServer(1482): Serving as a21b6491b371,43197,1733327539032, RpcServer on a21b6491b371/172.17.0.2:43197, sessionid=0x1017e2d30160001 2024-12-04T15:52:20,384 DEBUG [RS:0;a21b6491b371:43197 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-04T15:52:20,384 DEBUG [RS:0;a21b6491b371:43197 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager a21b6491b371,43197,1733327539032 2024-12-04T15:52:20,384 DEBUG [RS:0;a21b6491b371:43197 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'a21b6491b371,43197,1733327539032' 2024-12-04T15:52:20,384 DEBUG [RS:0;a21b6491b371:43197 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-04T15:52:20,384 DEBUG [RS:0;a21b6491b371:43197 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-04T15:52:20,385 INFO [RS:2;a21b6491b371:35881 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-04T15:52:20,385 INFO [RS:2;a21b6491b371:35881 {}] regionserver.HRegionServer(1482): Serving as a21b6491b371,35881,1733327539185, RpcServer on a21b6491b371/172.17.0.2:35881, sessionid=0x1017e2d30160003 2024-12-04T15:52:20,385 DEBUG [RS:2;a21b6491b371:35881 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-04T15:52:20,385 DEBUG [RS:0;a21b6491b371:43197 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-04T15:52:20,385 DEBUG [RS:2;a21b6491b371:35881 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager a21b6491b371,35881,1733327539185 2024-12-04T15:52:20,385 DEBUG [RS:0;a21b6491b371:43197 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-04T15:52:20,385 DEBUG [RS:2;a21b6491b371:35881 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'a21b6491b371,35881,1733327539185' 2024-12-04T15:52:20,385 DEBUG [RS:0;a21b6491b371:43197 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager a21b6491b371,43197,1733327539032 2024-12-04T15:52:20,385 DEBUG [RS:2;a21b6491b371:35881 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-04T15:52:20,385 DEBUG [RS:0;a21b6491b371:43197 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'a21b6491b371,43197,1733327539032' 2024-12-04T15:52:20,385 DEBUG [RS:0;a21b6491b371:43197 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-04T15:52:20,386 DEBUG [RS:2;a21b6491b371:35881 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-04T15:52:20,386 DEBUG [RS:0;a21b6491b371:43197 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-04T15:52:20,387 DEBUG [RS:0;a21b6491b371:43197 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-04T15:52:20,387 DEBUG [RS:2;a21b6491b371:35881 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-04T15:52:20,387 INFO [RS:0;a21b6491b371:43197 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-04T15:52:20,387 DEBUG [RS:2;a21b6491b371:35881 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-04T15:52:20,387 INFO [RS:0;a21b6491b371:43197 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-04T15:52:20,387 DEBUG [RS:2;a21b6491b371:35881 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager a21b6491b371,35881,1733327539185 2024-12-04T15:52:20,387 DEBUG [RS:2;a21b6491b371:35881 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'a21b6491b371,35881,1733327539185' 2024-12-04T15:52:20,387 DEBUG [RS:2;a21b6491b371:35881 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-04T15:52:20,387 DEBUG [RS:2;a21b6491b371:35881 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-04T15:52:20,388 DEBUG [RS:2;a21b6491b371:35881 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-04T15:52:20,388 INFO [RS:2;a21b6491b371:35881 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-04T15:52:20,388 INFO [RS:2;a21b6491b371:35881 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-04T15:52:20,475 WARN [a21b6491b371:46261 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-04T15:52:20,489 INFO [RS:2;a21b6491b371:35881 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-04T15:52:20,489 INFO [RS:0;a21b6491b371:43197 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-04T15:52:20,489 INFO [RS:1;a21b6491b371:39883 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-04T15:52:20,492 INFO [RS:2;a21b6491b371:35881 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=a21b6491b371%2C35881%2C1733327539185, suffix=, logDir=hdfs://localhost:34217/user/jenkins/test-data/f13b5a61-6504-4cf8-1d52-bcda544fecc1/WALs/a21b6491b371,35881,1733327539185, archiveDir=hdfs://localhost:34217/user/jenkins/test-data/f13b5a61-6504-4cf8-1d52-bcda544fecc1/oldWALs, maxLogs=32 2024-12-04T15:52:20,492 INFO [RS:1;a21b6491b371:39883 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=a21b6491b371%2C39883%2C1733327539139, suffix=, logDir=hdfs://localhost:34217/user/jenkins/test-data/f13b5a61-6504-4cf8-1d52-bcda544fecc1/WALs/a21b6491b371,39883,1733327539139, archiveDir=hdfs://localhost:34217/user/jenkins/test-data/f13b5a61-6504-4cf8-1d52-bcda544fecc1/oldWALs, maxLogs=32 2024-12-04T15:52:20,492 INFO [RS:0;a21b6491b371:43197 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=a21b6491b371%2C43197%2C1733327539032, suffix=, logDir=hdfs://localhost:34217/user/jenkins/test-data/f13b5a61-6504-4cf8-1d52-bcda544fecc1/WALs/a21b6491b371,43197,1733327539032, archiveDir=hdfs://localhost:34217/user/jenkins/test-data/f13b5a61-6504-4cf8-1d52-bcda544fecc1/oldWALs, maxLogs=32 2024-12-04T15:52:20,512 DEBUG [RS:0;a21b6491b371:43197 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/f13b5a61-6504-4cf8-1d52-bcda544fecc1/WALs/a21b6491b371,43197,1733327539032/a21b6491b371%2C43197%2C1733327539032.1733327540497, exclude list is [], retry=0 2024-12-04T15:52:20,512 DEBUG [RS:1;a21b6491b371:39883 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/f13b5a61-6504-4cf8-1d52-bcda544fecc1/WALs/a21b6491b371,39883,1733327539139/a21b6491b371%2C39883%2C1733327539139.1733327540497, exclude list is [], retry=0 2024-12-04T15:52:20,512 DEBUG [RS:2;a21b6491b371:35881 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/f13b5a61-6504-4cf8-1d52-bcda544fecc1/WALs/a21b6491b371,35881,1733327539185/a21b6491b371%2C35881%2C1733327539185.1733327540496, exclude list is [], retry=0 2024-12-04T15:52:20,518 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:41925,DS-6e325b22-77ed-4b3a-bc7d-5936710c2006,DISK] 2024-12-04T15:52:20,518 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:41925,DS-6e325b22-77ed-4b3a-bc7d-5936710c2006,DISK] 2024-12-04T15:52:20,518 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:45289,DS-52a8bfb8-612f-49d2-ab50-041a00986f87,DISK] 2024-12-04T15:52:20,518 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:45289,DS-52a8bfb8-612f-49d2-ab50-041a00986f87,DISK] 2024-12-04T15:52:20,519 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:33405,DS-effbfd14-6aeb-4d03-9115-05142749f245,DISK] 2024-12-04T15:52:20,519 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:33405,DS-effbfd14-6aeb-4d03-9115-05142749f245,DISK] 2024-12-04T15:52:20,519 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:41925,DS-6e325b22-77ed-4b3a-bc7d-5936710c2006,DISK] 2024-12-04T15:52:20,519 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:33405,DS-effbfd14-6aeb-4d03-9115-05142749f245,DISK] 2024-12-04T15:52:20,549 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:45289,DS-52a8bfb8-612f-49d2-ab50-041a00986f87,DISK] 2024-12-04T15:52:20,557 INFO [RS:0;a21b6491b371:43197 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/f13b5a61-6504-4cf8-1d52-bcda544fecc1/WALs/a21b6491b371,43197,1733327539032/a21b6491b371%2C43197%2C1733327539032.1733327540497 2024-12-04T15:52:20,557 INFO [RS:1;a21b6491b371:39883 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/f13b5a61-6504-4cf8-1d52-bcda544fecc1/WALs/a21b6491b371,39883,1733327539139/a21b6491b371%2C39883%2C1733327539139.1733327540497 2024-12-04T15:52:20,558 INFO [RS:2;a21b6491b371:35881 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/f13b5a61-6504-4cf8-1d52-bcda544fecc1/WALs/a21b6491b371,35881,1733327539185/a21b6491b371%2C35881%2C1733327539185.1733327540496 2024-12-04T15:52:20,558 DEBUG [RS:0;a21b6491b371:43197 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:38711:38711),(127.0.0.1/127.0.0.1:37259:37259),(127.0.0.1/127.0.0.1:40533:40533)] 2024-12-04T15:52:20,558 DEBUG [RS:1;a21b6491b371:39883 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:37259:37259),(127.0.0.1/127.0.0.1:38711:38711),(127.0.0.1/127.0.0.1:40533:40533)] 2024-12-04T15:52:20,559 DEBUG [RS:2;a21b6491b371:35881 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:40533:40533),(127.0.0.1/127.0.0.1:37259:37259),(127.0.0.1/127.0.0.1:38711:38711)] 2024-12-04T15:52:20,728 DEBUG [a21b6491b371:46261 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=3, allServersCount=3 2024-12-04T15:52:20,736 DEBUG [a21b6491b371:46261 {}] balancer.BalancerClusterState(204): Hosts are {a21b6491b371=0} racks are {/default-rack=0} 2024-12-04T15:52:20,742 DEBUG [a21b6491b371:46261 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-04T15:52:20,742 DEBUG [a21b6491b371:46261 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-04T15:52:20,742 DEBUG [a21b6491b371:46261 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-04T15:52:20,742 DEBUG [a21b6491b371:46261 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-04T15:52:20,742 DEBUG [a21b6491b371:46261 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-04T15:52:20,743 DEBUG [a21b6491b371:46261 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-04T15:52:20,743 INFO [a21b6491b371:46261 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-04T15:52:20,743 INFO [a21b6491b371:46261 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-04T15:52:20,743 INFO [a21b6491b371:46261 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-04T15:52:20,743 DEBUG [a21b6491b371:46261 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-04T15:52:20,752 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=a21b6491b371,35881,1733327539185 2024-12-04T15:52:20,759 INFO [PEWorker-2 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as a21b6491b371,35881,1733327539185, state=OPENING 2024-12-04T15:52:20,765 DEBUG [PEWorker-2 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-04T15:52:20,767 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39883-0x1017e2d30160002, quorum=127.0.0.1:64574, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T15:52:20,767 DEBUG [pool-65-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43197-0x1017e2d30160001, quorum=127.0.0.1:64574, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T15:52:20,767 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35881-0x1017e2d30160003, quorum=127.0.0.1:64574, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T15:52:20,767 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46261-0x1017e2d30160000, quorum=127.0.0.1:64574, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T15:52:20,768 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-04T15:52:20,768 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-04T15:52:20,768 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-04T15:52:20,768 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-04T15:52:20,769 DEBUG [PEWorker-2 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-04T15:52:20,771 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=a21b6491b371,35881,1733327539185}] 2024-12-04T15:52:20,946 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-04T15:52:20,948 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43359, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-04T15:52:20,961 INFO [RS_OPEN_META-regionserver/a21b6491b371:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-04T15:52:20,962 INFO [RS_OPEN_META-regionserver/a21b6491b371:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-04T15:52:20,962 INFO [RS_OPEN_META-regionserver/a21b6491b371:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-12-04T15:52:20,966 INFO [RS_OPEN_META-regionserver/a21b6491b371:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=a21b6491b371%2C35881%2C1733327539185.meta, suffix=.meta, logDir=hdfs://localhost:34217/user/jenkins/test-data/f13b5a61-6504-4cf8-1d52-bcda544fecc1/WALs/a21b6491b371,35881,1733327539185, archiveDir=hdfs://localhost:34217/user/jenkins/test-data/f13b5a61-6504-4cf8-1d52-bcda544fecc1/oldWALs, maxLogs=32 2024-12-04T15:52:20,983 DEBUG [RS_OPEN_META-regionserver/a21b6491b371:0-0 {event_type=M_RS_OPEN_META, pid=3}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/f13b5a61-6504-4cf8-1d52-bcda544fecc1/WALs/a21b6491b371,35881,1733327539185/a21b6491b371%2C35881%2C1733327539185.meta.1733327540968.meta, exclude list is [], retry=0 2024-12-04T15:52:20,987 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:41925,DS-6e325b22-77ed-4b3a-bc7d-5936710c2006,DISK] 2024-12-04T15:52:20,988 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:33405,DS-effbfd14-6aeb-4d03-9115-05142749f245,DISK] 2024-12-04T15:52:20,988 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:45289,DS-52a8bfb8-612f-49d2-ab50-041a00986f87,DISK] 2024-12-04T15:52:20,991 INFO [RS_OPEN_META-regionserver/a21b6491b371:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/f13b5a61-6504-4cf8-1d52-bcda544fecc1/WALs/a21b6491b371,35881,1733327539185/a21b6491b371%2C35881%2C1733327539185.meta.1733327540968.meta 2024-12-04T15:52:20,991 DEBUG [RS_OPEN_META-regionserver/a21b6491b371:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:37259:37259),(127.0.0.1/127.0.0.1:40533:40533),(127.0.0.1/127.0.0.1:38711:38711)] 2024-12-04T15:52:20,992 DEBUG [RS_OPEN_META-regionserver/a21b6491b371:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-04T15:52:20,994 DEBUG [RS_OPEN_META-regionserver/a21b6491b371:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-04T15:52:20,997 DEBUG [RS_OPEN_META-regionserver/a21b6491b371:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-04T15:52:21,002 INFO [RS_OPEN_META-regionserver/a21b6491b371:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-04T15:52:21,007 DEBUG [RS_OPEN_META-regionserver/a21b6491b371:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-04T15:52:21,007 DEBUG [RS_OPEN_META-regionserver/a21b6491b371:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-04T15:52:21,007 DEBUG [RS_OPEN_META-regionserver/a21b6491b371:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-04T15:52:21,007 DEBUG [RS_OPEN_META-regionserver/a21b6491b371:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-04T15:52:21,011 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-04T15:52:21,012 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-04T15:52:21,013 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:52:21,014 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-04T15:52:21,014 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-04T15:52:21,015 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-04T15:52:21,016 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:52:21,017 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-04T15:52:21,017 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-04T15:52:21,018 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-04T15:52:21,018 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:52:21,019 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-04T15:52:21,019 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-04T15:52:21,021 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-04T15:52:21,021 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:52:21,022 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-04T15:52:21,022 DEBUG [RS_OPEN_META-regionserver/a21b6491b371:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-04T15:52:21,023 DEBUG [RS_OPEN_META-regionserver/a21b6491b371:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34217/user/jenkins/test-data/f13b5a61-6504-4cf8-1d52-bcda544fecc1/data/hbase/meta/1588230740 2024-12-04T15:52:21,025 DEBUG [RS_OPEN_META-regionserver/a21b6491b371:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34217/user/jenkins/test-data/f13b5a61-6504-4cf8-1d52-bcda544fecc1/data/hbase/meta/1588230740 2024-12-04T15:52:21,028 DEBUG [RS_OPEN_META-regionserver/a21b6491b371:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-04T15:52:21,028 DEBUG [RS_OPEN_META-regionserver/a21b6491b371:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-04T15:52:21,029 DEBUG [RS_OPEN_META-regionserver/a21b6491b371:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-04T15:52:21,031 DEBUG [RS_OPEN_META-regionserver/a21b6491b371:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-04T15:52:21,033 INFO [RS_OPEN_META-regionserver/a21b6491b371:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=63004933, jitterRate=-0.061153337359428406}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-04T15:52:21,033 DEBUG [RS_OPEN_META-regionserver/a21b6491b371:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-04T15:52:21,035 DEBUG [RS_OPEN_META-regionserver/a21b6491b371:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733327541008Writing region info on filesystem at 1733327541008Initializing all the Stores at 1733327541010 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733327541010Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733327541010Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733327541010Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733327541010Cleaning up temporary data from old regions at 1733327541028 (+18 ms)Running coprocessor post-open hooks at 1733327541033 (+5 ms)Region opened successfully at 1733327541035 (+2 ms) 2024-12-04T15:52:21,042 INFO [RS_OPEN_META-regionserver/a21b6491b371:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733327540937 2024-12-04T15:52:21,054 DEBUG [RS_OPEN_META-regionserver/a21b6491b371:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-04T15:52:21,054 INFO [RS_OPEN_META-regionserver/a21b6491b371:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-04T15:52:21,056 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=a21b6491b371,35881,1733327539185 2024-12-04T15:52:21,058 INFO [PEWorker-4 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as a21b6491b371,35881,1733327539185, state=OPEN 2024-12-04T15:52:21,060 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46261-0x1017e2d30160000, quorum=127.0.0.1:64574, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-04T15:52:21,060 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35881-0x1017e2d30160003, quorum=127.0.0.1:64574, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-04T15:52:21,060 DEBUG [pool-65-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43197-0x1017e2d30160001, quorum=127.0.0.1:64574, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-04T15:52:21,060 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39883-0x1017e2d30160002, quorum=127.0.0.1:64574, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-04T15:52:21,060 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-04T15:52:21,060 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-04T15:52:21,060 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-04T15:52:21,060 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-04T15:52:21,061 DEBUG [PEWorker-4 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=a21b6491b371,35881,1733327539185 2024-12-04T15:52:21,065 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-04T15:52:21,066 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=a21b6491b371,35881,1733327539185 in 290 msec 2024-12-04T15:52:21,071 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-04T15:52:21,071 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 758 msec 2024-12-04T15:52:21,073 DEBUG [PEWorker-5 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-04T15:52:21,073 INFO [PEWorker-5 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-04T15:52:21,094 DEBUG [PEWorker-5 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-04T15:52:21,095 DEBUG [PEWorker-5 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=a21b6491b371,35881,1733327539185, seqNum=-1] 2024-12-04T15:52:21,118 DEBUG [PEWorker-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-04T15:52:21,121 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52551, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-04T15:52:21,160 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.0970 sec 2024-12-04T15:52:21,160 INFO [master/a21b6491b371:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733327541160, completionTime=-1 2024-12-04T15:52:21,164 INFO [master/a21b6491b371:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=3; waited=0ms, expected min=3 server(s), max=3 server(s), master is running 2024-12-04T15:52:21,164 DEBUG [master/a21b6491b371:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-04T15:52:21,197 INFO [master/a21b6491b371:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=3 2024-12-04T15:52:21,198 INFO [master/a21b6491b371:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733327601198 2024-12-04T15:52:21,198 INFO [master/a21b6491b371:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733327661198 2024-12-04T15:52:21,198 INFO [master/a21b6491b371:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 33 msec 2024-12-04T15:52:21,200 DEBUG [master/a21b6491b371:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 1588230740 changed from -1.0 to 0.0, refreshing cache 2024-12-04T15:52:21,207 INFO [master/a21b6491b371:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=a21b6491b371,46261,1733327538339-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-04T15:52:21,207 INFO [master/a21b6491b371:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=a21b6491b371,46261,1733327538339-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-04T15:52:21,208 INFO [master/a21b6491b371:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=a21b6491b371,46261,1733327538339-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-04T15:52:21,209 INFO [master/a21b6491b371:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-a21b6491b371:46261, period=300000, unit=MILLISECONDS is enabled. 2024-12-04T15:52:21,210 INFO [master/a21b6491b371:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-04T15:52:21,211 INFO [master/a21b6491b371:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-04T15:52:21,217 DEBUG [master/a21b6491b371:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-04T15:52:21,238 INFO [master/a21b6491b371:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.977sec 2024-12-04T15:52:21,240 INFO [master/a21b6491b371:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-04T15:52:21,241 INFO [master/a21b6491b371:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-04T15:52:21,242 INFO [master/a21b6491b371:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-04T15:52:21,242 INFO [master/a21b6491b371:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-04T15:52:21,242 INFO [master/a21b6491b371:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-04T15:52:21,243 INFO [master/a21b6491b371:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=a21b6491b371,46261,1733327538339-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-04T15:52:21,243 INFO [master/a21b6491b371:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=a21b6491b371,46261,1733327538339-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-04T15:52:21,248 DEBUG [master/a21b6491b371:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-04T15:52:21,249 INFO [master/a21b6491b371:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-04T15:52:21,249 INFO [master/a21b6491b371:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=a21b6491b371,46261,1733327538339-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-04T15:52:21,323 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@ccc5dda, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-04T15:52:21,327 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-12-04T15:52:21,327 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-12-04T15:52:21,331 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request a21b6491b371,46261,-1 for getting cluster id 2024-12-04T15:52:21,334 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-04T15:52:21,342 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'db5896f3-0241-4aba-9203-13a7561ae661' 2024-12-04T15:52:21,344 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-04T15:52:21,344 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "db5896f3-0241-4aba-9203-13a7561ae661" 2024-12-04T15:52:21,344 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@54ba1ab8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-04T15:52:21,344 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [a21b6491b371,46261,-1] 2024-12-04T15:52:21,347 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-04T15:52:21,349 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-04T15:52:21,349 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57202, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-04T15:52:21,352 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@9627ec0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-04T15:52:21,353 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-04T15:52:21,360 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=a21b6491b371,35881,1733327539185, seqNum=-1] 2024-12-04T15:52:21,360 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-04T15:52:21,363 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34128, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-04T15:52:21,382 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=a21b6491b371,46261,1733327538339 2024-12-04T15:52:21,387 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-04T15:52:21,391 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.AsyncConnectionImpl(321): The fetched master address is a21b6491b371,46261,1733327538339 2024-12-04T15:52:21,394 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@101e99aa 2024-12-04T15:52:21,395 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-04T15:52:21,397 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57208, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-04T15:52:21,402 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46261 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-04T15:52:21,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46261 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC 2024-12-04T15:52:21,412 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_PRE_OPERATION 2024-12-04T15:52:21,414 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46261 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestHBaseWalOnEC" procId is: 4 2024-12-04T15:52:21,415 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:52:21,417 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-04T15:52:21,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46261 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-04T15:52:21,426 WARN [PEWorker-2 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-04T15:52:21,426 WARN [PEWorker-2 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-04T15:52:21,430 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-780551652_22 at /127.0.0.1:41342 [Receiving block BP-818689816-172.17.0.2-1733327535191:blk_-9223372036854775680_1020] {}] datanode.DataXceiver(331): 127.0.0.1:33405:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41342 dst: /127.0.0.1:33405 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:52:21,434 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33405 is added to blk_-9223372036854775680_1021 (size=392) 2024-12-04T15:52:21,435 WARN [PEWorker-2 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-04T15:52:21,437 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 6a40cd612297d3c163dfe11bead42255, NAME => 'TestHBaseWalOnEC,,1733327541399.6a40cd612297d3c163dfe11bead42255.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:34217/user/jenkins/test-data/f13b5a61-6504-4cf8-1d52-bcda544fecc1 2024-12-04T15:52:21,443 WARN [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-04T15:52:21,443 WARN [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-04T15:52:21,450 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-780551652_22 at /127.0.0.1:41364 [Receiving block BP-818689816-172.17.0.2-1733327535191:blk_-9223372036854775664_1022] {}] datanode.DataXceiver(331): 127.0.0.1:33405:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41364 dst: /127.0.0.1:33405 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:52:21,457 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33405 is added to blk_-9223372036854775664_1023 (size=51) 2024-12-04T15:52:21,462 WARN [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-04T15:52:21,462 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1733327541399.6a40cd612297d3c163dfe11bead42255.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-04T15:52:21,463 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1722): Closing 6a40cd612297d3c163dfe11bead42255, disabling compactions & flushes 2024-12-04T15:52:21,463 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1733327541399.6a40cd612297d3c163dfe11bead42255. 2024-12-04T15:52:21,463 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1733327541399.6a40cd612297d3c163dfe11bead42255. 2024-12-04T15:52:21,463 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1733327541399.6a40cd612297d3c163dfe11bead42255. after waiting 0 ms 2024-12-04T15:52:21,463 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1733327541399.6a40cd612297d3c163dfe11bead42255. 2024-12-04T15:52:21,463 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1733327541399.6a40cd612297d3c163dfe11bead42255. 2024-12-04T15:52:21,463 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1676): Region close journal for 6a40cd612297d3c163dfe11bead42255: Waiting for close lock at 1733327541463Disabling compacts and flushes for region at 1733327541463Disabling writes for close at 1733327541463Writing region close event to WAL at 1733327541463Closed at 1733327541463 2024-12-04T15:52:21,465 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ADD_TO_META 2024-12-04T15:52:21,470 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestHBaseWalOnEC,,1733327541399.6a40cd612297d3c163dfe11bead42255.","families":{"info":[{"qualifier":"regioninfo","vlen":50,"tag":[],"timestamp":"1733327541466"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733327541466"}]},"ts":"1733327541466"} 2024-12-04T15:52:21,476 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-04T15:52:21,478 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-04T15:52:21,480 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733327541478"}]},"ts":"1733327541478"} 2024-12-04T15:52:21,485 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLING in hbase:meta 2024-12-04T15:52:21,486 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(204): Hosts are {a21b6491b371=0} racks are {/default-rack=0} 2024-12-04T15:52:21,487 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-04T15:52:21,487 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-04T15:52:21,487 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-04T15:52:21,487 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-04T15:52:21,487 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-04T15:52:21,487 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-04T15:52:21,487 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-04T15:52:21,487 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-04T15:52:21,487 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-04T15:52:21,487 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-04T15:52:21,488 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=6a40cd612297d3c163dfe11bead42255, ASSIGN}] 2024-12-04T15:52:21,491 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=6a40cd612297d3c163dfe11bead42255, ASSIGN 2024-12-04T15:52:21,492 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=6a40cd612297d3c163dfe11bead42255, ASSIGN; state=OFFLINE, location=a21b6491b371,39883,1733327539139; forceNewPlan=false, retain=false 2024-12-04T15:52:21,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46261 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-04T15:52:21,645 INFO [a21b6491b371:46261 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-04T15:52:21,646 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=6a40cd612297d3c163dfe11bead42255, regionState=OPENING, regionLocation=a21b6491b371,39883,1733327539139 2024-12-04T15:52:21,650 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=6a40cd612297d3c163dfe11bead42255, ASSIGN because future has completed 2024-12-04T15:52:21,651 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 6a40cd612297d3c163dfe11bead42255, server=a21b6491b371,39883,1733327539139}] 2024-12-04T15:52:21,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46261 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-04T15:52:21,806 DEBUG [RSProcedureDispatcher-pool-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-04T15:52:21,808 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49991, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-04T15:52:21,814 INFO [RS_OPEN_REGION-regionserver/a21b6491b371:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestHBaseWalOnEC,,1733327541399.6a40cd612297d3c163dfe11bead42255. 2024-12-04T15:52:21,814 DEBUG [RS_OPEN_REGION-regionserver/a21b6491b371:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 6a40cd612297d3c163dfe11bead42255, NAME => 'TestHBaseWalOnEC,,1733327541399.6a40cd612297d3c163dfe11bead42255.', STARTKEY => '', ENDKEY => ''} 2024-12-04T15:52:21,815 DEBUG [RS_OPEN_REGION-regionserver/a21b6491b371:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestHBaseWalOnEC 6a40cd612297d3c163dfe11bead42255 2024-12-04T15:52:21,815 DEBUG [RS_OPEN_REGION-regionserver/a21b6491b371:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1733327541399.6a40cd612297d3c163dfe11bead42255.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-04T15:52:21,815 DEBUG [RS_OPEN_REGION-regionserver/a21b6491b371:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 6a40cd612297d3c163dfe11bead42255 2024-12-04T15:52:21,815 DEBUG [RS_OPEN_REGION-regionserver/a21b6491b371:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 6a40cd612297d3c163dfe11bead42255 2024-12-04T15:52:21,818 INFO [StoreOpener-6a40cd612297d3c163dfe11bead42255-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 6a40cd612297d3c163dfe11bead42255 2024-12-04T15:52:21,820 INFO [StoreOpener-6a40cd612297d3c163dfe11bead42255-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 6a40cd612297d3c163dfe11bead42255 columnFamilyName cf 2024-12-04T15:52:21,820 DEBUG [StoreOpener-6a40cd612297d3c163dfe11bead42255-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:52:21,821 INFO [StoreOpener-6a40cd612297d3c163dfe11bead42255-1 {}] regionserver.HStore(327): Store=6a40cd612297d3c163dfe11bead42255/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-04T15:52:21,821 DEBUG [RS_OPEN_REGION-regionserver/a21b6491b371:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 6a40cd612297d3c163dfe11bead42255 2024-12-04T15:52:21,823 DEBUG [RS_OPEN_REGION-regionserver/a21b6491b371:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34217/user/jenkins/test-data/f13b5a61-6504-4cf8-1d52-bcda544fecc1/data/default/TestHBaseWalOnEC/6a40cd612297d3c163dfe11bead42255 2024-12-04T15:52:21,823 DEBUG [RS_OPEN_REGION-regionserver/a21b6491b371:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34217/user/jenkins/test-data/f13b5a61-6504-4cf8-1d52-bcda544fecc1/data/default/TestHBaseWalOnEC/6a40cd612297d3c163dfe11bead42255 2024-12-04T15:52:21,824 DEBUG [RS_OPEN_REGION-regionserver/a21b6491b371:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 6a40cd612297d3c163dfe11bead42255 2024-12-04T15:52:21,824 DEBUG [RS_OPEN_REGION-regionserver/a21b6491b371:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 6a40cd612297d3c163dfe11bead42255 2024-12-04T15:52:21,827 DEBUG [RS_OPEN_REGION-regionserver/a21b6491b371:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 6a40cd612297d3c163dfe11bead42255 2024-12-04T15:52:21,833 DEBUG [RS_OPEN_REGION-regionserver/a21b6491b371:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34217/user/jenkins/test-data/f13b5a61-6504-4cf8-1d52-bcda544fecc1/data/default/TestHBaseWalOnEC/6a40cd612297d3c163dfe11bead42255/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-04T15:52:21,834 INFO [RS_OPEN_REGION-regionserver/a21b6491b371:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 6a40cd612297d3c163dfe11bead42255; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=68765699, jitterRate=0.02468876540660858}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-04T15:52:21,834 DEBUG [RS_OPEN_REGION-regionserver/a21b6491b371:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 6a40cd612297d3c163dfe11bead42255 2024-12-04T15:52:21,836 DEBUG [RS_OPEN_REGION-regionserver/a21b6491b371:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 6a40cd612297d3c163dfe11bead42255: Running coprocessor pre-open hook at 1733327541815Writing region info on filesystem at 1733327541815Initializing all the Stores at 1733327541817 (+2 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733327541817Cleaning up temporary data from old regions at 1733327541824 (+7 ms)Running coprocessor post-open hooks at 1733327541834 (+10 ms)Region opened successfully at 1733327541836 (+2 ms) 2024-12-04T15:52:21,838 INFO [RS_OPEN_REGION-regionserver/a21b6491b371:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestHBaseWalOnEC,,1733327541399.6a40cd612297d3c163dfe11bead42255., pid=6, masterSystemTime=1733327541805 2024-12-04T15:52:21,842 DEBUG [RS_OPEN_REGION-regionserver/a21b6491b371:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestHBaseWalOnEC,,1733327541399.6a40cd612297d3c163dfe11bead42255. 2024-12-04T15:52:21,842 INFO [RS_OPEN_REGION-regionserver/a21b6491b371:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestHBaseWalOnEC,,1733327541399.6a40cd612297d3c163dfe11bead42255. 2024-12-04T15:52:21,843 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=6a40cd612297d3c163dfe11bead42255, regionState=OPEN, openSeqNum=2, regionLocation=a21b6491b371,39883,1733327539139 2024-12-04T15:52:21,847 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 6a40cd612297d3c163dfe11bead42255, server=a21b6491b371,39883,1733327539139 because future has completed 2024-12-04T15:52:21,853 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-04T15:52:21,853 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 6a40cd612297d3c163dfe11bead42255, server=a21b6491b371,39883,1733327539139 in 198 msec 2024-12-04T15:52:21,857 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-04T15:52:21,857 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=6a40cd612297d3c163dfe11bead42255, ASSIGN in 365 msec 2024-12-04T15:52:21,859 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-04T15:52:21,859 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733327541859"}]},"ts":"1733327541859"} 2024-12-04T15:52:21,862 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLED in hbase:meta 2024-12-04T15:52:21,863 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_POST_OPERATION 2024-12-04T15:52:21,866 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC in 458 msec 2024-12-04T15:52:22,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46261 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-04T15:52:22,047 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestHBaseWalOnEC completed 2024-12-04T15:52:22,048 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table TestHBaseWalOnEC get assigned. Timeout = 60000ms 2024-12-04T15:52:22,049 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-04T15:52:22,054 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table TestHBaseWalOnEC assigned to meta. Checking AM states. 2024-12-04T15:52:22,054 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-04T15:52:22,055 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table TestHBaseWalOnEC assigned. 2024-12-04T15:52:22,062 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestHBaseWalOnEC', row='row', locateType=CURRENT is [region=TestHBaseWalOnEC,,1733327541399.6a40cd612297d3c163dfe11bead42255., hostname=a21b6491b371,39883,1733327539139, seqNum=2] 2024-12-04T15:52:22,063 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-04T15:52:22,066 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34594, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-04T15:52:22,074 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46261 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestHBaseWalOnEC 2024-12-04T15:52:22,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46261 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC 2024-12-04T15:52:22,080 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_PREPARE 2024-12-04T15:52:22,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46261 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-04T15:52:22,081 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-04T15:52:22,083 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-04T15:52:22,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46261 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-04T15:52:22,243 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39883 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-12-04T15:52:22,244 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a21b6491b371:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestHBaseWalOnEC,,1733327541399.6a40cd612297d3c163dfe11bead42255. 2024-12-04T15:52:22,249 INFO [RS_FLUSH_OPERATIONS-regionserver/a21b6491b371:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing 6a40cd612297d3c163dfe11bead42255 1/1 column families, dataSize=32 B heapSize=360 B 2024-12-04T15:52:22,303 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a21b6491b371:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34217/user/jenkins/test-data/f13b5a61-6504-4cf8-1d52-bcda544fecc1/data/default/TestHBaseWalOnEC/6a40cd612297d3c163dfe11bead42255/.tmp/cf/fdc3aa7bf7f443b3a4cc512e7fa59934 is 36, key is row/cf:cq/1733327542066/Put/seqid=0 2024-12-04T15:52:22,309 WARN [RS_FLUSH_OPERATIONS-regionserver/a21b6491b371:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-04T15:52:22,309 WARN [RS_FLUSH_OPERATIONS-regionserver/a21b6491b371:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-04T15:52:22,313 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1785435193_22 at /127.0.0.1:36942 [Receiving block BP-818689816-172.17.0.2-1733327535191:blk_-9223372036854775648_1024] {}] datanode.DataXceiver(331): 127.0.0.1:45289:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36942 dst: /127.0.0.1:45289 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:52:22,317 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45289 is added to blk_-9223372036854775648_1025 (size=4787) 2024-12-04T15:52:22,318 WARN [RS_FLUSH_OPERATIONS-regionserver/a21b6491b371:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-04T15:52:22,318 INFO [RS_FLUSH_OPERATIONS-regionserver/a21b6491b371:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=32 B at sequenceid=5 (bloomFilter=false), to=hdfs://localhost:34217/user/jenkins/test-data/f13b5a61-6504-4cf8-1d52-bcda544fecc1/data/default/TestHBaseWalOnEC/6a40cd612297d3c163dfe11bead42255/.tmp/cf/fdc3aa7bf7f443b3a4cc512e7fa59934 2024-12-04T15:52:22,363 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a21b6491b371:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34217/user/jenkins/test-data/f13b5a61-6504-4cf8-1d52-bcda544fecc1/data/default/TestHBaseWalOnEC/6a40cd612297d3c163dfe11bead42255/.tmp/cf/fdc3aa7bf7f443b3a4cc512e7fa59934 as hdfs://localhost:34217/user/jenkins/test-data/f13b5a61-6504-4cf8-1d52-bcda544fecc1/data/default/TestHBaseWalOnEC/6a40cd612297d3c163dfe11bead42255/cf/fdc3aa7bf7f443b3a4cc512e7fa59934 2024-12-04T15:52:22,374 INFO [RS_FLUSH_OPERATIONS-regionserver/a21b6491b371:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34217/user/jenkins/test-data/f13b5a61-6504-4cf8-1d52-bcda544fecc1/data/default/TestHBaseWalOnEC/6a40cd612297d3c163dfe11bead42255/cf/fdc3aa7bf7f443b3a4cc512e7fa59934, entries=1, sequenceid=5, filesize=4.7 K 2024-12-04T15:52:22,380 INFO [RS_FLUSH_OPERATIONS-regionserver/a21b6491b371:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~32 B/32, heapSize ~344 B/344, currentSize=0 B/0 for 6a40cd612297d3c163dfe11bead42255 in 131ms, sequenceid=5, compaction requested=false 2024-12-04T15:52:22,381 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a21b6491b371:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestHBaseWalOnEC' 2024-12-04T15:52:22,383 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a21b6491b371:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for 6a40cd612297d3c163dfe11bead42255: 2024-12-04T15:52:22,383 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a21b6491b371:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestHBaseWalOnEC,,1733327541399.6a40cd612297d3c163dfe11bead42255. 2024-12-04T15:52:22,384 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a21b6491b371:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-12-04T15:52:22,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46261 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-12-04T15:52:22,392 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-12-04T15:52:22,392 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 305 msec 2024-12-04T15:52:22,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46261 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-04T15:52:22,397 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC in 319 msec 2024-12-04T15:52:22,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46261 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-04T15:52:22,708 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestHBaseWalOnEC completed 2024-12-04T15:52:22,723 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-04T15:52:22,723 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-04T15:52:22,724 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-04T15:52:22,730 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-04T15:52:22,730 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-04T15:52:22,730 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-04T15:52:22,731 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-04T15:52:22,731 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=994033306, stopped=false 2024-12-04T15:52:22,731 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=a21b6491b371,46261,1733327538339 2024-12-04T15:52:22,733 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35881-0x1017e2d30160003, quorum=127.0.0.1:64574, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-04T15:52:22,733 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39883-0x1017e2d30160002, quorum=127.0.0.1:64574, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-04T15:52:22,733 DEBUG [pool-65-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43197-0x1017e2d30160001, quorum=127.0.0.1:64574, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-04T15:52:22,733 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46261-0x1017e2d30160000, quorum=127.0.0.1:64574, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-04T15:52:22,733 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-04T15:52:22,733 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35881-0x1017e2d30160003, quorum=127.0.0.1:64574, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T15:52:22,733 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39883-0x1017e2d30160002, quorum=127.0.0.1:64574, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T15:52:22,733 DEBUG [pool-65-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43197-0x1017e2d30160001, quorum=127.0.0.1:64574, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T15:52:22,733 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46261-0x1017e2d30160000, quorum=127.0.0.1:64574, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T15:52:22,734 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-04T15:52:22,734 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-04T15:52:22,734 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-04T15:52:22,734 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'a21b6491b371,43197,1733327539032' ***** 2024-12-04T15:52:22,734 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:43197-0x1017e2d30160001, quorum=127.0.0.1:64574, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-04T15:52:22,734 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-04T15:52:22,734 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'a21b6491b371,39883,1733327539139' ***** 2024-12-04T15:52:22,734 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-04T15:52:22,735 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'a21b6491b371,35881,1733327539185' ***** 2024-12-04T15:52:22,735 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-04T15:52:22,735 INFO [RS:0;a21b6491b371:43197 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-04T15:52:22,735 INFO [RS:2;a21b6491b371:35881 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-04T15:52:22,735 INFO [RS:0;a21b6491b371:43197 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-04T15:52:22,735 INFO [RS:0;a21b6491b371:43197 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-04T15:52:22,735 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-04T15:52:22,735 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-04T15:52:22,735 INFO [RS:0;a21b6491b371:43197 {}] regionserver.HRegionServer(959): stopping server a21b6491b371,43197,1733327539032 2024-12-04T15:52:22,735 INFO [RS:0;a21b6491b371:43197 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-04T15:52:22,736 INFO [RS:0;a21b6491b371:43197 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;a21b6491b371:43197. 2024-12-04T15:52:22,736 INFO [RS:2;a21b6491b371:35881 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-04T15:52:22,736 DEBUG [RS:0;a21b6491b371:43197 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-04T15:52:22,736 INFO [RS:1;a21b6491b371:39883 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-04T15:52:22,736 INFO [RS:2;a21b6491b371:35881 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-04T15:52:22,736 DEBUG [RS:0;a21b6491b371:43197 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-04T15:52:22,736 INFO [RS:2;a21b6491b371:35881 {}] regionserver.HRegionServer(959): stopping server a21b6491b371,35881,1733327539185 2024-12-04T15:52:22,736 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:39883-0x1017e2d30160002, quorum=127.0.0.1:64574, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-04T15:52:22,736 INFO [RS:1;a21b6491b371:39883 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-04T15:52:22,736 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:46261-0x1017e2d30160000, quorum=127.0.0.1:64574, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-04T15:52:22,736 INFO [RS:2;a21b6491b371:35881 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-04T15:52:22,736 INFO [RS:1;a21b6491b371:39883 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-04T15:52:22,736 INFO [RS:2;a21b6491b371:35881 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:2;a21b6491b371:35881. 2024-12-04T15:52:22,736 INFO [RS:0;a21b6491b371:43197 {}] regionserver.HRegionServer(976): stopping server a21b6491b371,43197,1733327539032; all regions closed. 2024-12-04T15:52:22,736 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-04T15:52:22,736 DEBUG [RS:2;a21b6491b371:35881 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-04T15:52:22,736 INFO [RS:1;a21b6491b371:39883 {}] regionserver.HRegionServer(3091): Received CLOSE for 6a40cd612297d3c163dfe11bead42255 2024-12-04T15:52:22,736 DEBUG [RS:2;a21b6491b371:35881 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-04T15:52:22,736 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:35881-0x1017e2d30160003, quorum=127.0.0.1:64574, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-04T15:52:22,736 INFO [RS:2;a21b6491b371:35881 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-04T15:52:22,737 INFO [RS:2;a21b6491b371:35881 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-04T15:52:22,737 INFO [RS:2;a21b6491b371:35881 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-04T15:52:22,737 INFO [RS:2;a21b6491b371:35881 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-04T15:52:22,737 INFO [RS:1;a21b6491b371:39883 {}] regionserver.HRegionServer(959): stopping server a21b6491b371,39883,1733327539139 2024-12-04T15:52:22,737 INFO [RS:1;a21b6491b371:39883 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-04T15:52:22,737 INFO [RS:1;a21b6491b371:39883 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;a21b6491b371:39883. 2024-12-04T15:52:22,737 DEBUG [RS:1;a21b6491b371:39883 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-04T15:52:22,737 DEBUG [RS:1;a21b6491b371:39883 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-04T15:52:22,738 INFO [RS:1;a21b6491b371:39883 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-12-04T15:52:22,738 DEBUG [RS:1;a21b6491b371:39883 {}] regionserver.HRegionServer(1325): Online Regions={6a40cd612297d3c163dfe11bead42255=TestHBaseWalOnEC,,1733327541399.6a40cd612297d3c163dfe11bead42255.} 2024-12-04T15:52:22,738 DEBUG [RS_CLOSE_REGION-regionserver/a21b6491b371:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 6a40cd612297d3c163dfe11bead42255, disabling compactions & flushes 2024-12-04T15:52:22,738 INFO [RS_CLOSE_REGION-regionserver/a21b6491b371:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1733327541399.6a40cd612297d3c163dfe11bead42255. 2024-12-04T15:52:22,738 INFO [RS:2;a21b6491b371:35881 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-12-04T15:52:22,738 DEBUG [RS_CLOSE_REGION-regionserver/a21b6491b371:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1733327541399.6a40cd612297d3c163dfe11bead42255. 2024-12-04T15:52:22,738 DEBUG [RS:2;a21b6491b371:35881 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-12-04T15:52:22,738 DEBUG [RS_CLOSE_REGION-regionserver/a21b6491b371:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1733327541399.6a40cd612297d3c163dfe11bead42255. after waiting 0 ms 2024-12-04T15:52:22,738 DEBUG [RS:1;a21b6491b371:39883 {}] regionserver.HRegionServer(1351): Waiting on 6a40cd612297d3c163dfe11bead42255 2024-12-04T15:52:22,738 DEBUG [RS_CLOSE_REGION-regionserver/a21b6491b371:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1733327541399.6a40cd612297d3c163dfe11bead42255. 2024-12-04T15:52:22,738 DEBUG [RS:2;a21b6491b371:35881 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-12-04T15:52:22,738 DEBUG [RS_CLOSE_META-regionserver/a21b6491b371:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-04T15:52:22,739 INFO [RS_CLOSE_META-regionserver/a21b6491b371:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-04T15:52:22,739 DEBUG [RS_CLOSE_META-regionserver/a21b6491b371:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-04T15:52:22,739 DEBUG [RS_CLOSE_META-regionserver/a21b6491b371:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-04T15:52:22,739 DEBUG [RS_CLOSE_META-regionserver/a21b6491b371:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-04T15:52:22,739 INFO [RS_CLOSE_META-regionserver/a21b6491b371:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.34 KB heapSize=3.38 KB 2024-12-04T15:52:22,744 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33405 is added to blk_1073741827_1017 (size=93) 2024-12-04T15:52:22,744 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41925 is added to blk_1073741827_1017 (size=93) 2024-12-04T15:52:22,744 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45289 is added to blk_1073741827_1017 (size=93) 2024-12-04T15:52:22,751 DEBUG [RS:0;a21b6491b371:43197 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/f13b5a61-6504-4cf8-1d52-bcda544fecc1/oldWALs 2024-12-04T15:52:22,752 INFO [RS:0;a21b6491b371:43197 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL a21b6491b371%2C43197%2C1733327539032:(num 1733327540497) 2024-12-04T15:52:22,752 DEBUG [RS:0;a21b6491b371:43197 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-04T15:52:22,752 INFO [RS:0;a21b6491b371:43197 {}] regionserver.LeaseManager(133): Closed leases 2024-12-04T15:52:22,752 INFO [RS:0;a21b6491b371:43197 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-04T15:52:22,752 INFO [RS:0;a21b6491b371:43197 {}] hbase.ChoreService(370): Chore service for: regionserver/a21b6491b371:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-04T15:52:22,753 INFO [RS:0;a21b6491b371:43197 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-04T15:52:22,753 INFO [RS:0;a21b6491b371:43197 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-04T15:52:22,753 INFO [regionserver/a21b6491b371:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-04T15:52:22,753 INFO [RS:0;a21b6491b371:43197 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-04T15:52:22,753 INFO [RS:0;a21b6491b371:43197 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-04T15:52:22,753 INFO [RS:0;a21b6491b371:43197 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:43197 2024-12-04T15:52:22,756 INFO [regionserver/a21b6491b371:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-04T15:52:22,756 INFO [regionserver/a21b6491b371:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-04T15:52:22,756 INFO [regionserver/a21b6491b371:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-04T15:52:22,757 DEBUG [pool-65-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43197-0x1017e2d30160001, quorum=127.0.0.1:64574, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/a21b6491b371,43197,1733327539032 2024-12-04T15:52:22,757 INFO [RS:0;a21b6491b371:43197 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-04T15:52:22,757 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46261-0x1017e2d30160000, quorum=127.0.0.1:64574, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-04T15:52:22,760 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [a21b6491b371,43197,1733327539032] 2024-12-04T15:52:22,762 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/a21b6491b371,43197,1733327539032 already deleted, retry=false 2024-12-04T15:52:22,763 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; a21b6491b371,43197,1733327539032 expired; onlineServers=2 2024-12-04T15:52:22,766 DEBUG [RS_CLOSE_REGION-regionserver/a21b6491b371:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34217/user/jenkins/test-data/f13b5a61-6504-4cf8-1d52-bcda544fecc1/data/default/TestHBaseWalOnEC/6a40cd612297d3c163dfe11bead42255/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-04T15:52:22,767 INFO [RS_CLOSE_REGION-regionserver/a21b6491b371:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1733327541399.6a40cd612297d3c163dfe11bead42255. 2024-12-04T15:52:22,768 DEBUG [RS_CLOSE_REGION-regionserver/a21b6491b371:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 6a40cd612297d3c163dfe11bead42255: Waiting for close lock at 1733327542737Running coprocessor pre-close hooks at 1733327542738 (+1 ms)Disabling compacts and flushes for region at 1733327542738Disabling writes for close at 1733327542738Writing region close event to WAL at 1733327542741 (+3 ms)Running coprocessor post-close hooks at 1733327542767 (+26 ms)Closed at 1733327542767 2024-12-04T15:52:22,768 DEBUG [RS_CLOSE_REGION-regionserver/a21b6491b371:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestHBaseWalOnEC,,1733327541399.6a40cd612297d3c163dfe11bead42255. 2024-12-04T15:52:22,773 DEBUG [RS_CLOSE_META-regionserver/a21b6491b371:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34217/user/jenkins/test-data/f13b5a61-6504-4cf8-1d52-bcda544fecc1/data/hbase/meta/1588230740/.tmp/info/e7c6db6338e14df3bf013e6ce4bcfd8d is 153, key is TestHBaseWalOnEC,,1733327541399.6a40cd612297d3c163dfe11bead42255./info:regioninfo/1733327541843/Put/seqid=0 2024-12-04T15:52:22,776 WARN [RS_CLOSE_META-regionserver/a21b6491b371:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-04T15:52:22,777 WARN [RS_CLOSE_META-regionserver/a21b6491b371:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-04T15:52:22,781 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-78944950_22 at /127.0.0.1:36960 [Receiving block BP-818689816-172.17.0.2-1733327535191:blk_-9223372036854775632_1026] {}] datanode.DataXceiver(331): 127.0.0.1:45289:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36960 dst: /127.0.0.1:45289 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:52:22,785 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45289 is added to blk_-9223372036854775632_1027 (size=6637) 2024-12-04T15:52:22,786 WARN [RS_CLOSE_META-regionserver/a21b6491b371:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-04T15:52:22,786 INFO [RS_CLOSE_META-regionserver/a21b6491b371:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.18 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:34217/user/jenkins/test-data/f13b5a61-6504-4cf8-1d52-bcda544fecc1/data/hbase/meta/1588230740/.tmp/info/e7c6db6338e14df3bf013e6ce4bcfd8d 2024-12-04T15:52:22,813 DEBUG [RS_CLOSE_META-regionserver/a21b6491b371:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34217/user/jenkins/test-data/f13b5a61-6504-4cf8-1d52-bcda544fecc1/data/hbase/meta/1588230740/.tmp/ns/31c7ead0231849b3b50bdf2fc9e252b3 is 43, key is default/ns:d/1733327541125/Put/seqid=0 2024-12-04T15:52:22,815 WARN [RS_CLOSE_META-regionserver/a21b6491b371:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-04T15:52:22,816 WARN [RS_CLOSE_META-regionserver/a21b6491b371:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-04T15:52:22,820 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-78944950_22 at /127.0.0.1:36974 [Receiving block BP-818689816-172.17.0.2-1733327535191:blk_-9223372036854775616_1028] {}] datanode.DataXceiver(331): 127.0.0.1:45289:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36974 dst: /127.0.0.1:45289 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:52:22,824 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45289 is added to blk_-9223372036854775616_1029 (size=5153) 2024-12-04T15:52:22,824 WARN [RS_CLOSE_META-regionserver/a21b6491b371:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-04T15:52:22,824 INFO [RS_CLOSE_META-regionserver/a21b6491b371:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:34217/user/jenkins/test-data/f13b5a61-6504-4cf8-1d52-bcda544fecc1/data/hbase/meta/1588230740/.tmp/ns/31c7ead0231849b3b50bdf2fc9e252b3 2024-12-04T15:52:22,850 DEBUG [RS_CLOSE_META-regionserver/a21b6491b371:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34217/user/jenkins/test-data/f13b5a61-6504-4cf8-1d52-bcda544fecc1/data/hbase/meta/1588230740/.tmp/table/9b906a9b8106470a93523c5af88e21cc is 52, key is TestHBaseWalOnEC/table:state/1733327541859/Put/seqid=0 2024-12-04T15:52:22,852 WARN [RS_CLOSE_META-regionserver/a21b6491b371:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-04T15:52:22,852 WARN [RS_CLOSE_META-regionserver/a21b6491b371:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-04T15:52:22,855 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-78944950_22 at /127.0.0.1:41388 [Receiving block BP-818689816-172.17.0.2-1733327535191:blk_-9223372036854775600_1030] {}] datanode.DataXceiver(331): 127.0.0.1:33405:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41388 dst: /127.0.0.1:33405 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:52:22,860 DEBUG [pool-65-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43197-0x1017e2d30160001, quorum=127.0.0.1:64574, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-04T15:52:22,860 DEBUG [pool-65-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43197-0x1017e2d30160001, quorum=127.0.0.1:64574, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-04T15:52:22,860 INFO [RS:0;a21b6491b371:43197 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-04T15:52:22,860 INFO [RS:0;a21b6491b371:43197 {}] regionserver.HRegionServer(1031): Exiting; stopping=a21b6491b371,43197,1733327539032; zookeeper connection closed. 2024-12-04T15:52:22,861 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@5bd9626 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@5bd9626 2024-12-04T15:52:22,865 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33405 is added to blk_-9223372036854775600_1031 (size=5249) 2024-12-04T15:52:22,938 INFO [RS:1;a21b6491b371:39883 {}] regionserver.HRegionServer(976): stopping server a21b6491b371,39883,1733327539139; all regions closed. 2024-12-04T15:52:22,938 DEBUG [RS:2;a21b6491b371:35881 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-12-04T15:52:22,942 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41925 is added to blk_1073741826_1016 (size=1298) 2024-12-04T15:52:22,942 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33405 is added to blk_1073741826_1016 (size=1298) 2024-12-04T15:52:22,942 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45289 is added to blk_1073741826_1016 (size=1298) 2024-12-04T15:52:22,946 DEBUG [RS:1;a21b6491b371:39883 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/f13b5a61-6504-4cf8-1d52-bcda544fecc1/oldWALs 2024-12-04T15:52:22,946 INFO [RS:1;a21b6491b371:39883 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL a21b6491b371%2C39883%2C1733327539139:(num 1733327540497) 2024-12-04T15:52:22,946 DEBUG [RS:1;a21b6491b371:39883 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-04T15:52:22,946 INFO [RS:1;a21b6491b371:39883 {}] regionserver.LeaseManager(133): Closed leases 2024-12-04T15:52:22,947 INFO [RS:1;a21b6491b371:39883 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-04T15:52:22,947 INFO [RS:1;a21b6491b371:39883 {}] hbase.ChoreService(370): Chore service for: regionserver/a21b6491b371:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-04T15:52:22,947 INFO [RS:1;a21b6491b371:39883 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-04T15:52:22,947 INFO [regionserver/a21b6491b371:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-04T15:52:22,947 INFO [RS:1;a21b6491b371:39883 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-04T15:52:22,947 INFO [RS:1;a21b6491b371:39883 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-04T15:52:22,947 INFO [RS:1;a21b6491b371:39883 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-04T15:52:22,948 INFO [RS:1;a21b6491b371:39883 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:39883 2024-12-04T15:52:22,950 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39883-0x1017e2d30160002, quorum=127.0.0.1:64574, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/a21b6491b371,39883,1733327539139 2024-12-04T15:52:22,950 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46261-0x1017e2d30160000, quorum=127.0.0.1:64574, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-04T15:52:22,950 INFO [RS:1;a21b6491b371:39883 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-04T15:52:22,952 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [a21b6491b371,39883,1733327539139] 2024-12-04T15:52:22,953 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/a21b6491b371,39883,1733327539139 already deleted, retry=false 2024-12-04T15:52:22,953 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; a21b6491b371,39883,1733327539139 expired; onlineServers=1 2024-12-04T15:52:23,052 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39883-0x1017e2d30160002, quorum=127.0.0.1:64574, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-04T15:52:23,052 INFO [RS:1;a21b6491b371:39883 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-04T15:52:23,052 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39883-0x1017e2d30160002, quorum=127.0.0.1:64574, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-04T15:52:23,052 INFO [RS:1;a21b6491b371:39883 {}] regionserver.HRegionServer(1031): Exiting; stopping=a21b6491b371,39883,1733327539139; zookeeper connection closed. 2024-12-04T15:52:23,052 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@2c2cc2b5 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@2c2cc2b5 2024-12-04T15:52:23,063 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33405 is added to blk_-9223372036854775757_1006 (size=196) 2024-12-04T15:52:23,066 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41925 is added to blk_-9223372036854775725_1010 (size=34) 2024-12-04T15:52:23,066 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45289 is added to blk_-9223372036854775724_1010 (size=34) 2024-12-04T15:52:23,066 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41925 is added to blk_-9223372036854775756_1006 (size=196) 2024-12-04T15:52:23,067 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33405 is added to blk_-9223372036854775740_1008 (size=1189) 2024-12-04T15:52:23,067 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41925 is added to blk_-9223372036854775741_1008 (size=1189) 2024-12-04T15:52:23,139 DEBUG [RS:2;a21b6491b371:35881 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-12-04T15:52:23,163 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45289 is added to blk_-9223372036854775773_1004 (size=42) 2024-12-04T15:52:23,163 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33405 is added to blk_-9223372036854775772_1004 (size=42) 2024-12-04T15:52:23,164 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45289 is added to blk_-9223372036854775693_1015 (size=32) 2024-12-04T15:52:23,164 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41925 is added to blk_-9223372036854775692_1015 (size=32) 2024-12-04T15:52:23,266 WARN [RS_CLOSE_META-regionserver/a21b6491b371:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-04T15:52:23,266 INFO [RS_CLOSE_META-regionserver/a21b6491b371:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=96 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:34217/user/jenkins/test-data/f13b5a61-6504-4cf8-1d52-bcda544fecc1/data/hbase/meta/1588230740/.tmp/table/9b906a9b8106470a93523c5af88e21cc 2024-12-04T15:52:23,277 DEBUG [RS_CLOSE_META-regionserver/a21b6491b371:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34217/user/jenkins/test-data/f13b5a61-6504-4cf8-1d52-bcda544fecc1/data/hbase/meta/1588230740/.tmp/info/e7c6db6338e14df3bf013e6ce4bcfd8d as hdfs://localhost:34217/user/jenkins/test-data/f13b5a61-6504-4cf8-1d52-bcda544fecc1/data/hbase/meta/1588230740/info/e7c6db6338e14df3bf013e6ce4bcfd8d 2024-12-04T15:52:23,286 INFO [RS_CLOSE_META-regionserver/a21b6491b371:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34217/user/jenkins/test-data/f13b5a61-6504-4cf8-1d52-bcda544fecc1/data/hbase/meta/1588230740/info/e7c6db6338e14df3bf013e6ce4bcfd8d, entries=10, sequenceid=11, filesize=6.5 K 2024-12-04T15:52:23,288 DEBUG [RS_CLOSE_META-regionserver/a21b6491b371:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34217/user/jenkins/test-data/f13b5a61-6504-4cf8-1d52-bcda544fecc1/data/hbase/meta/1588230740/.tmp/ns/31c7ead0231849b3b50bdf2fc9e252b3 as hdfs://localhost:34217/user/jenkins/test-data/f13b5a61-6504-4cf8-1d52-bcda544fecc1/data/hbase/meta/1588230740/ns/31c7ead0231849b3b50bdf2fc9e252b3 2024-12-04T15:52:23,296 INFO [RS_CLOSE_META-regionserver/a21b6491b371:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34217/user/jenkins/test-data/f13b5a61-6504-4cf8-1d52-bcda544fecc1/data/hbase/meta/1588230740/ns/31c7ead0231849b3b50bdf2fc9e252b3, entries=2, sequenceid=11, filesize=5.0 K 2024-12-04T15:52:23,297 DEBUG [RS_CLOSE_META-regionserver/a21b6491b371:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34217/user/jenkins/test-data/f13b5a61-6504-4cf8-1d52-bcda544fecc1/data/hbase/meta/1588230740/.tmp/table/9b906a9b8106470a93523c5af88e21cc as hdfs://localhost:34217/user/jenkins/test-data/f13b5a61-6504-4cf8-1d52-bcda544fecc1/data/hbase/meta/1588230740/table/9b906a9b8106470a93523c5af88e21cc 2024-12-04T15:52:23,305 INFO [RS_CLOSE_META-regionserver/a21b6491b371:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34217/user/jenkins/test-data/f13b5a61-6504-4cf8-1d52-bcda544fecc1/data/hbase/meta/1588230740/table/9b906a9b8106470a93523c5af88e21cc, entries=2, sequenceid=11, filesize=5.1 K 2024-12-04T15:52:23,307 INFO [RS_CLOSE_META-regionserver/a21b6491b371:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 568ms, sequenceid=11, compaction requested=false 2024-12-04T15:52:23,307 DEBUG [RS_CLOSE_META-regionserver/a21b6491b371:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-04T15:52:23,316 DEBUG [RS_CLOSE_META-regionserver/a21b6491b371:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34217/user/jenkins/test-data/f13b5a61-6504-4cf8-1d52-bcda544fecc1/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-12-04T15:52:23,317 DEBUG [RS_CLOSE_META-regionserver/a21b6491b371:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-04T15:52:23,317 INFO [RS_CLOSE_META-regionserver/a21b6491b371:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-04T15:52:23,317 DEBUG [RS_CLOSE_META-regionserver/a21b6491b371:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733327542738Running coprocessor pre-close hooks at 1733327542738Disabling compacts and flushes for region at 1733327542738Disabling writes for close at 1733327542739 (+1 ms)Obtaining lock to block concurrent updates at 1733327542739Preparing flush snapshotting stores in 1588230740 at 1733327542739Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1377, getHeapSize=3392, getOffHeapSize=0, getCellsCount=14 at 1733327542740 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1733327542741 (+1 ms)Flushing 1588230740/info: creating writer at 1733327542741Flushing 1588230740/info: appending metadata at 1733327542770 (+29 ms)Flushing 1588230740/info: closing flushed file at 1733327542770Flushing 1588230740/ns: creating writer at 1733327542795 (+25 ms)Flushing 1588230740/ns: appending metadata at 1733327542812 (+17 ms)Flushing 1588230740/ns: closing flushed file at 1733327542813 (+1 ms)Flushing 1588230740/table: creating writer at 1733327542833 (+20 ms)Flushing 1588230740/table: appending metadata at 1733327542849 (+16 ms)Flushing 1588230740/table: closing flushed file at 1733327542849Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4844017: reopening flushed file at 1733327543275 (+426 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2fd0dab1: reopening flushed file at 1733327543287 (+12 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2c0572da: reopening flushed file at 1733327543296 (+9 ms)Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 568ms, sequenceid=11, compaction requested=false at 1733327543307 (+11 ms)Writing region close event to WAL at 1733327543309 (+2 ms)Running coprocessor post-close hooks at 1733327543316 (+7 ms)Closed at 1733327543317 (+1 ms) 2024-12-04T15:52:23,317 DEBUG [RS_CLOSE_META-regionserver/a21b6491b371:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-04T15:52:23,339 INFO [RS:2;a21b6491b371:35881 {}] regionserver.HRegionServer(976): stopping server a21b6491b371,35881,1733327539185; all regions closed. 2024-12-04T15:52:23,342 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41925 is added to blk_1073741829_1019 (size=2751) 2024-12-04T15:52:23,343 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33405 is added to blk_1073741829_1019 (size=2751) 2024-12-04T15:52:23,343 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45289 is added to blk_1073741829_1019 (size=2751) 2024-12-04T15:52:23,346 DEBUG [RS:2;a21b6491b371:35881 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/f13b5a61-6504-4cf8-1d52-bcda544fecc1/oldWALs 2024-12-04T15:52:23,346 INFO [RS:2;a21b6491b371:35881 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL a21b6491b371%2C35881%2C1733327539185.meta:.meta(num 1733327540968) 2024-12-04T15:52:23,349 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33405 is added to blk_1073741828_1018 (size=93) 2024-12-04T15:52:23,349 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41925 is added to blk_1073741828_1018 (size=93) 2024-12-04T15:52:23,349 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45289 is added to blk_1073741828_1018 (size=93) 2024-12-04T15:52:23,352 DEBUG [RS:2;a21b6491b371:35881 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/f13b5a61-6504-4cf8-1d52-bcda544fecc1/oldWALs 2024-12-04T15:52:23,352 INFO [RS:2;a21b6491b371:35881 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL a21b6491b371%2C35881%2C1733327539185:(num 1733327540496) 2024-12-04T15:52:23,352 DEBUG [RS:2;a21b6491b371:35881 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-04T15:52:23,352 INFO [RS:2;a21b6491b371:35881 {}] regionserver.LeaseManager(133): Closed leases 2024-12-04T15:52:23,352 INFO [RS:2;a21b6491b371:35881 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-04T15:52:23,353 INFO [RS:2;a21b6491b371:35881 {}] hbase.ChoreService(370): Chore service for: regionserver/a21b6491b371:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-04T15:52:23,353 INFO [RS:2;a21b6491b371:35881 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-04T15:52:23,353 INFO [regionserver/a21b6491b371:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-04T15:52:23,353 INFO [RS:2;a21b6491b371:35881 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:35881 2024-12-04T15:52:23,357 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46261-0x1017e2d30160000, quorum=127.0.0.1:64574, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-04T15:52:23,357 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35881-0x1017e2d30160003, quorum=127.0.0.1:64574, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/a21b6491b371,35881,1733327539185 2024-12-04T15:52:23,357 INFO [RS:2;a21b6491b371:35881 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-04T15:52:23,358 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [a21b6491b371,35881,1733327539185] 2024-12-04T15:52:23,359 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/a21b6491b371,35881,1733327539185 already deleted, retry=false 2024-12-04T15:52:23,359 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; a21b6491b371,35881,1733327539185 expired; onlineServers=0 2024-12-04T15:52:23,359 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'a21b6491b371,46261,1733327538339' ***** 2024-12-04T15:52:23,359 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-04T15:52:23,359 INFO [M:0;a21b6491b371:46261 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-04T15:52:23,360 INFO [M:0;a21b6491b371:46261 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-04T15:52:23,360 DEBUG [M:0;a21b6491b371:46261 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-04T15:52:23,360 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-04T15:52:23,360 DEBUG [M:0;a21b6491b371:46261 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-04T15:52:23,360 DEBUG [master/a21b6491b371:0:becomeActiveMaster-HFileCleaner.large.0-1733327540147 {}] cleaner.HFileCleaner(306): Exit Thread[master/a21b6491b371:0:becomeActiveMaster-HFileCleaner.large.0-1733327540147,5,FailOnTimeoutGroup] 2024-12-04T15:52:23,360 DEBUG [master/a21b6491b371:0:becomeActiveMaster-HFileCleaner.small.0-1733327540149 {}] cleaner.HFileCleaner(306): Exit Thread[master/a21b6491b371:0:becomeActiveMaster-HFileCleaner.small.0-1733327540149,5,FailOnTimeoutGroup] 2024-12-04T15:52:23,360 INFO [M:0;a21b6491b371:46261 {}] hbase.ChoreService(370): Chore service for: master/a21b6491b371:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-04T15:52:23,360 INFO [M:0;a21b6491b371:46261 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-04T15:52:23,361 DEBUG [M:0;a21b6491b371:46261 {}] master.HMaster(1795): Stopping service threads 2024-12-04T15:52:23,361 INFO [M:0;a21b6491b371:46261 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-04T15:52:23,361 INFO [M:0;a21b6491b371:46261 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-04T15:52:23,361 INFO [M:0;a21b6491b371:46261 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-04T15:52:23,361 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-04T15:52:23,362 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46261-0x1017e2d30160000, quorum=127.0.0.1:64574, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-04T15:52:23,362 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46261-0x1017e2d30160000, quorum=127.0.0.1:64574, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T15:52:23,362 DEBUG [M:0;a21b6491b371:46261 {}] zookeeper.ZKUtil(347): master:46261-0x1017e2d30160000, quorum=127.0.0.1:64574, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-04T15:52:23,362 WARN [M:0;a21b6491b371:46261 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-04T15:52:23,363 INFO [M:0;a21b6491b371:46261 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:34217/user/jenkins/test-data/f13b5a61-6504-4cf8-1d52-bcda544fecc1/.lastflushedseqids 2024-12-04T15:52:23,372 WARN [M:0;a21b6491b371:46261 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-04T15:52:23,373 WARN [M:0;a21b6491b371:46261 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-04T15:52:23,375 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-780551652_22 at /127.0.0.1:37018 [Receiving block BP-818689816-172.17.0.2-1733327535191:blk_-9223372036854775584_1032] {}] datanode.DataXceiver(331): 127.0.0.1:45289:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:37018 dst: /127.0.0.1:45289 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:52:23,379 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45289 is added to blk_-9223372036854775584_1033 (size=127) 2024-12-04T15:52:23,380 WARN [M:0;a21b6491b371:46261 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-04T15:52:23,380 INFO [M:0;a21b6491b371:46261 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-04T15:52:23,380 INFO [M:0;a21b6491b371:46261 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-04T15:52:23,380 DEBUG [M:0;a21b6491b371:46261 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-04T15:52:23,380 INFO [M:0;a21b6491b371:46261 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-04T15:52:23,380 DEBUG [M:0;a21b6491b371:46261 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-04T15:52:23,380 DEBUG [M:0;a21b6491b371:46261 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-04T15:52:23,380 DEBUG [M:0;a21b6491b371:46261 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-04T15:52:23,381 INFO [M:0;a21b6491b371:46261 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=26.82 KB heapSize=34.11 KB 2024-12-04T15:52:23,400 DEBUG [M:0;a21b6491b371:46261 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34217/user/jenkins/test-data/f13b5a61-6504-4cf8-1d52-bcda544fecc1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/ac8cce98f6e74a9c8f4545a5a0609838 is 82, key is hbase:meta,,1/info:regioninfo/1733327541055/Put/seqid=0 2024-12-04T15:52:23,402 WARN [M:0;a21b6491b371:46261 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-04T15:52:23,402 WARN [M:0;a21b6491b371:46261 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-04T15:52:23,404 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-780551652_22 at /127.0.0.1:41454 [Receiving block BP-818689816-172.17.0.2-1733327535191:blk_-9223372036854775568_1034] {}] datanode.DataXceiver(331): 127.0.0.1:33405:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41454 dst: /127.0.0.1:33405 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:52:23,408 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33405 is added to blk_-9223372036854775568_1035 (size=5672) 2024-12-04T15:52:23,408 WARN [M:0;a21b6491b371:46261 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-04T15:52:23,409 INFO [M:0;a21b6491b371:46261 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:34217/user/jenkins/test-data/f13b5a61-6504-4cf8-1d52-bcda544fecc1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/ac8cce98f6e74a9c8f4545a5a0609838 2024-12-04T15:52:23,432 DEBUG [M:0;a21b6491b371:46261 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34217/user/jenkins/test-data/f13b5a61-6504-4cf8-1d52-bcda544fecc1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/a74ecf4e0d614f3eb1020ee1e87f7029 is 748, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1733327541865/Put/seqid=0 2024-12-04T15:52:23,434 WARN [M:0;a21b6491b371:46261 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-04T15:52:23,434 WARN [M:0;a21b6491b371:46261 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-04T15:52:23,437 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-780551652_22 at /127.0.0.1:41486 [Receiving block BP-818689816-172.17.0.2-1733327535191:blk_-9223372036854775552_1036] {}] datanode.DataXceiver(331): 127.0.0.1:33405:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41486 dst: /127.0.0.1:33405 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:52:23,441 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33405 is added to blk_-9223372036854775552_1037 (size=6438) 2024-12-04T15:52:23,442 WARN [M:0;a21b6491b371:46261 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-04T15:52:23,442 INFO [M:0;a21b6491b371:46261 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.14 KB at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:34217/user/jenkins/test-data/f13b5a61-6504-4cf8-1d52-bcda544fecc1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/a74ecf4e0d614f3eb1020ee1e87f7029 2024-12-04T15:52:23,458 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35881-0x1017e2d30160003, quorum=127.0.0.1:64574, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-04T15:52:23,458 INFO [RS:2;a21b6491b371:35881 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-04T15:52:23,458 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35881-0x1017e2d30160003, quorum=127.0.0.1:64574, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-04T15:52:23,458 INFO [RS:2;a21b6491b371:35881 {}] regionserver.HRegionServer(1031): Exiting; stopping=a21b6491b371,35881,1733327539185; zookeeper connection closed. 2024-12-04T15:52:23,459 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@5885cb66 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@5885cb66 2024-12-04T15:52:23,459 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 3 regionserver(s) complete 2024-12-04T15:52:23,466 DEBUG [M:0;a21b6491b371:46261 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34217/user/jenkins/test-data/f13b5a61-6504-4cf8-1d52-bcda544fecc1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/76fcf781b9024e35a8c5172a50d6c0e9 is 69, key is a21b6491b371,35881,1733327539185/rs:state/1733327540250/Put/seqid=0 2024-12-04T15:52:23,468 WARN [M:0;a21b6491b371:46261 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-04T15:52:23,468 WARN [M:0;a21b6491b371:46261 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-04T15:52:23,471 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-780551652_22 at /127.0.0.1:41506 [Receiving block BP-818689816-172.17.0.2-1733327535191:blk_-9223372036854775536_1038] {}] datanode.DataXceiver(331): 127.0.0.1:33405:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41506 dst: /127.0.0.1:33405 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T15:52:23,475 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33405 is added to blk_-9223372036854775536_1039 (size=5294) 2024-12-04T15:52:23,475 WARN [M:0;a21b6491b371:46261 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-04T15:52:23,475 INFO [M:0;a21b6491b371:46261 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=195 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:34217/user/jenkins/test-data/f13b5a61-6504-4cf8-1d52-bcda544fecc1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/76fcf781b9024e35a8c5172a50d6c0e9 2024-12-04T15:52:23,486 DEBUG [M:0;a21b6491b371:46261 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34217/user/jenkins/test-data/f13b5a61-6504-4cf8-1d52-bcda544fecc1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/ac8cce98f6e74a9c8f4545a5a0609838 as hdfs://localhost:34217/user/jenkins/test-data/f13b5a61-6504-4cf8-1d52-bcda544fecc1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/ac8cce98f6e74a9c8f4545a5a0609838 2024-12-04T15:52:23,494 INFO [M:0;a21b6491b371:46261 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34217/user/jenkins/test-data/f13b5a61-6504-4cf8-1d52-bcda544fecc1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/ac8cce98f6e74a9c8f4545a5a0609838, entries=8, sequenceid=72, filesize=5.5 K 2024-12-04T15:52:23,496 DEBUG [M:0;a21b6491b371:46261 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34217/user/jenkins/test-data/f13b5a61-6504-4cf8-1d52-bcda544fecc1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/a74ecf4e0d614f3eb1020ee1e87f7029 as hdfs://localhost:34217/user/jenkins/test-data/f13b5a61-6504-4cf8-1d52-bcda544fecc1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/a74ecf4e0d614f3eb1020ee1e87f7029 2024-12-04T15:52:23,503 INFO [M:0;a21b6491b371:46261 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34217/user/jenkins/test-data/f13b5a61-6504-4cf8-1d52-bcda544fecc1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/a74ecf4e0d614f3eb1020ee1e87f7029, entries=8, sequenceid=72, filesize=6.3 K 2024-12-04T15:52:23,504 DEBUG [M:0;a21b6491b371:46261 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34217/user/jenkins/test-data/f13b5a61-6504-4cf8-1d52-bcda544fecc1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/76fcf781b9024e35a8c5172a50d6c0e9 as hdfs://localhost:34217/user/jenkins/test-data/f13b5a61-6504-4cf8-1d52-bcda544fecc1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/76fcf781b9024e35a8c5172a50d6c0e9 2024-12-04T15:52:23,511 INFO [M:0;a21b6491b371:46261 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34217/user/jenkins/test-data/f13b5a61-6504-4cf8-1d52-bcda544fecc1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/76fcf781b9024e35a8c5172a50d6c0e9, entries=3, sequenceid=72, filesize=5.2 K 2024-12-04T15:52:23,513 INFO [M:0;a21b6491b371:46261 {}] regionserver.HRegion(3140): Finished flush of dataSize ~26.82 KB/27462, heapSize ~33.81 KB/34624, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 132ms, sequenceid=72, compaction requested=false 2024-12-04T15:52:23,514 INFO [M:0;a21b6491b371:46261 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-04T15:52:23,514 DEBUG [M:0;a21b6491b371:46261 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733327543380Disabling compacts and flushes for region at 1733327543380Disabling writes for close at 1733327543380Obtaining lock to block concurrent updates at 1733327543381 (+1 ms)Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733327543381Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=27462, getHeapSize=34864, getOffHeapSize=0, getCellsCount=85 at 1733327543381Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733327543383 (+2 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733327543383Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733327543399 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733327543399Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733327543416 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733327543432 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733327543432Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733327543450 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733327543466 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733327543466Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4b52a572: reopening flushed file at 1733327543484 (+18 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2b36276e: reopening flushed file at 1733327543495 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5a5b63eb: reopening flushed file at 1733327543503 (+8 ms)Finished flush of dataSize ~26.82 KB/27462, heapSize ~33.81 KB/34624, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 132ms, sequenceid=72, compaction requested=false at 1733327543513 (+10 ms)Writing region close event to WAL at 1733327543514 (+1 ms)Closed at 1733327543514 2024-12-04T15:52:23,517 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45289 is added to blk_1073741825_1011 (size=32665) 2024-12-04T15:52:23,517 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41925 is added to blk_1073741825_1011 (size=32665) 2024-12-04T15:52:23,517 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33405 is added to blk_1073741825_1011 (size=32665) 2024-12-04T15:52:23,518 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-04T15:52:23,518 INFO [M:0;a21b6491b371:46261 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-04T15:52:23,518 INFO [M:0;a21b6491b371:46261 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:46261 2024-12-04T15:52:23,519 INFO [M:0;a21b6491b371:46261 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-04T15:52:23,622 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46261-0x1017e2d30160000, quorum=127.0.0.1:64574, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-04T15:52:23,622 INFO [M:0;a21b6491b371:46261 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-04T15:52:23,622 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46261-0x1017e2d30160000, quorum=127.0.0.1:64574, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-04T15:52:23,627 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3297a183{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-04T15:52:23,629 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2b8a83a2{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-04T15:52:23,629 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-04T15:52:23,629 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@17c0da3a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-04T15:52:23,629 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@656f7043{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b9e586b2-8d13-7277-bd27-636735ef8d2a/hadoop.log.dir/,STOPPED} 2024-12-04T15:52:23,632 WARN [BP-818689816-172.17.0.2-1733327535191 heartbeating to localhost/127.0.0.1:34217 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-04T15:52:23,632 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-04T15:52:23,632 WARN [BP-818689816-172.17.0.2-1733327535191 heartbeating to localhost/127.0.0.1:34217 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-818689816-172.17.0.2-1733327535191 (Datanode Uuid 2051eead-5a6f-4a1d-80be-5deabe2a7a00) service to localhost/127.0.0.1:34217 2024-12-04T15:52:23,632 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-04T15:52:23,633 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b9e586b2-8d13-7277-bd27-636735ef8d2a/cluster_74f99ff0-1fe5-798c-8e29-4b76516def66/data/data5/current/BP-818689816-172.17.0.2-1733327535191 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-04T15:52:23,633 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b9e586b2-8d13-7277-bd27-636735ef8d2a/cluster_74f99ff0-1fe5-798c-8e29-4b76516def66/data/data6/current/BP-818689816-172.17.0.2-1733327535191 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-04T15:52:23,634 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-04T15:52:23,637 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1548acd1{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-04T15:52:23,637 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3dc20694{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-04T15:52:23,637 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-04T15:52:23,637 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1fb4f3a3{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-04T15:52:23,637 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@37e44dc6{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b9e586b2-8d13-7277-bd27-636735ef8d2a/hadoop.log.dir/,STOPPED} 2024-12-04T15:52:23,639 WARN [BP-818689816-172.17.0.2-1733327535191 heartbeating to localhost/127.0.0.1:34217 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-04T15:52:23,639 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-04T15:52:23,639 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-04T15:52:23,639 WARN [BP-818689816-172.17.0.2-1733327535191 heartbeating to localhost/127.0.0.1:34217 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-818689816-172.17.0.2-1733327535191 (Datanode Uuid 8731965f-42c8-41c6-be10-91d3c09948c5) service to localhost/127.0.0.1:34217 2024-12-04T15:52:23,639 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b9e586b2-8d13-7277-bd27-636735ef8d2a/cluster_74f99ff0-1fe5-798c-8e29-4b76516def66/data/data3/current/BP-818689816-172.17.0.2-1733327535191 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-04T15:52:23,639 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b9e586b2-8d13-7277-bd27-636735ef8d2a/cluster_74f99ff0-1fe5-798c-8e29-4b76516def66/data/data4/current/BP-818689816-172.17.0.2-1733327535191 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-04T15:52:23,640 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-04T15:52:23,642 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6d1a7cf{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-04T15:52:23,642 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@20b70ca3{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-04T15:52:23,642 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-04T15:52:23,642 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@54f91ad6{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-04T15:52:23,642 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1d0819de{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b9e586b2-8d13-7277-bd27-636735ef8d2a/hadoop.log.dir/,STOPPED} 2024-12-04T15:52:23,643 WARN [BP-818689816-172.17.0.2-1733327535191 heartbeating to localhost/127.0.0.1:34217 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-04T15:52:23,643 WARN [BP-818689816-172.17.0.2-1733327535191 heartbeating to localhost/127.0.0.1:34217 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-818689816-172.17.0.2-1733327535191 (Datanode Uuid 7ad2d57b-5f62-4e42-9464-cd1f2452152c) service to localhost/127.0.0.1:34217 2024-12-04T15:52:23,643 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-04T15:52:23,643 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-04T15:52:23,644 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b9e586b2-8d13-7277-bd27-636735ef8d2a/cluster_74f99ff0-1fe5-798c-8e29-4b76516def66/data/data1/current/BP-818689816-172.17.0.2-1733327535191 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-04T15:52:23,644 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b9e586b2-8d13-7277-bd27-636735ef8d2a/cluster_74f99ff0-1fe5-798c-8e29-4b76516def66/data/data2/current/BP-818689816-172.17.0.2-1733327535191 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-04T15:52:23,644 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-04T15:52:23,653 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2ea2dca6{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-04T15:52:23,653 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2ed9b238{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-04T15:52:23,653 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-04T15:52:23,653 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@410292bb{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-04T15:52:23,654 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@27f57d6a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b9e586b2-8d13-7277-bd27-636735ef8d2a/hadoop.log.dir/,STOPPED} 2024-12-04T15:52:23,662 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-04T15:52:23,691 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-04T15:52:23,699 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestHBaseWalOnEC#testReadWrite[0] Thread=90 (was 160), OpenFileDescriptor=439 (was 391) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=181 (was 162) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=9576 (was 9914) 2024-12-04T15:52:23,706 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestHBaseWalOnEC#testReadWrite[1] Thread=90, OpenFileDescriptor=439, MaxFileDescriptor=1048576, SystemLoadAverage=181, ProcessCount=11, AvailableMemoryMB=9576 2024-12-04T15:52:23,706 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=3, rsPorts=, rsClass=null, numDataNodes=3, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-04T15:52:23,706 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b9e586b2-8d13-7277-bd27-636735ef8d2a/hadoop.log.dir so I do NOT create it in target/test-data/711447d9-579e-5d5a-6b73-9d10e101bda2 2024-12-04T15:52:23,706 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b9e586b2-8d13-7277-bd27-636735ef8d2a/hadoop.tmp.dir so I do NOT create it in target/test-data/711447d9-579e-5d5a-6b73-9d10e101bda2 2024-12-04T15:52:23,706 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/711447d9-579e-5d5a-6b73-9d10e101bda2/cluster_96c775b1-ed2f-6758-0f30-956f656d285b, deleteOnExit=true 2024-12-04T15:52:23,706 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-04T15:52:23,707 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/711447d9-579e-5d5a-6b73-9d10e101bda2/test.cache.data in system properties and HBase conf 2024-12-04T15:52:23,707 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/711447d9-579e-5d5a-6b73-9d10e101bda2/hadoop.tmp.dir in system properties and HBase conf 2024-12-04T15:52:23,707 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/711447d9-579e-5d5a-6b73-9d10e101bda2/hadoop.log.dir in system properties and HBase conf 2024-12-04T15:52:23,707 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/711447d9-579e-5d5a-6b73-9d10e101bda2/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-04T15:52:23,707 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/711447d9-579e-5d5a-6b73-9d10e101bda2/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-04T15:52:23,707 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-04T15:52:23,707 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-04T15:52:23,707 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/711447d9-579e-5d5a-6b73-9d10e101bda2/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-04T15:52:23,707 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/711447d9-579e-5d5a-6b73-9d10e101bda2/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-04T15:52:23,707 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/711447d9-579e-5d5a-6b73-9d10e101bda2/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-04T15:52:23,708 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/711447d9-579e-5d5a-6b73-9d10e101bda2/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-04T15:52:23,708 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/711447d9-579e-5d5a-6b73-9d10e101bda2/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-04T15:52:23,708 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/711447d9-579e-5d5a-6b73-9d10e101bda2/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-04T15:52:23,708 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/711447d9-579e-5d5a-6b73-9d10e101bda2/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-04T15:52:23,708 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/711447d9-579e-5d5a-6b73-9d10e101bda2/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-04T15:52:23,708 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/711447d9-579e-5d5a-6b73-9d10e101bda2/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-04T15:52:23,708 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/711447d9-579e-5d5a-6b73-9d10e101bda2/nfs.dump.dir in system properties and HBase conf 2024-12-04T15:52:23,708 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/711447d9-579e-5d5a-6b73-9d10e101bda2/java.io.tmpdir in system properties and HBase conf 2024-12-04T15:52:23,708 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/711447d9-579e-5d5a-6b73-9d10e101bda2/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-04T15:52:23,708 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/711447d9-579e-5d5a-6b73-9d10e101bda2/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-04T15:52:23,708 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/711447d9-579e-5d5a-6b73-9d10e101bda2/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-04T15:52:23,804 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-04T15:52:23,810 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-04T15:52:23,811 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-04T15:52:23,811 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-04T15:52:23,811 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-04T15:52:23,812 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-04T15:52:23,813 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@309f022b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/711447d9-579e-5d5a-6b73-9d10e101bda2/hadoop.log.dir/,AVAILABLE} 2024-12-04T15:52:23,813 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@40d3d129{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-04T15:52:23,929 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@22524515{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/711447d9-579e-5d5a-6b73-9d10e101bda2/java.io.tmpdir/jetty-localhost-33735-hadoop-hdfs-3_4_1-tests_jar-_-any-14648020138708539685/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-04T15:52:23,929 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7c2ad186{HTTP/1.1, (http/1.1)}{localhost:33735} 2024-12-04T15:52:23,930 INFO [Time-limited test {}] server.Server(415): Started @10633ms 2024-12-04T15:52:24,015 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-04T15:52:24,019 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-04T15:52:24,020 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-04T15:52:24,020 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-04T15:52:24,020 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-04T15:52:24,021 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@cec6057{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/711447d9-579e-5d5a-6b73-9d10e101bda2/hadoop.log.dir/,AVAILABLE} 2024-12-04T15:52:24,022 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@39d5f2c6{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-04T15:52:24,139 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@29e2a23f{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/711447d9-579e-5d5a-6b73-9d10e101bda2/java.io.tmpdir/jetty-localhost-33657-hadoop-hdfs-3_4_1-tests_jar-_-any-16532568691884522478/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-04T15:52:24,139 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4438aaf{HTTP/1.1, (http/1.1)}{localhost:33657} 2024-12-04T15:52:24,139 INFO [Time-limited test {}] server.Server(415): Started @10842ms 2024-12-04T15:52:24,141 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-04T15:52:24,179 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-04T15:52:24,183 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-04T15:52:24,184 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-04T15:52:24,184 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-04T15:52:24,184 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-04T15:52:24,185 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@47448499{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/711447d9-579e-5d5a-6b73-9d10e101bda2/hadoop.log.dir/,AVAILABLE} 2024-12-04T15:52:24,185 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5cb78ad1{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-04T15:52:24,253 WARN [Thread-526 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/711447d9-579e-5d5a-6b73-9d10e101bda2/cluster_96c775b1-ed2f-6758-0f30-956f656d285b/data/data1/current/BP-656230515-172.17.0.2-1733327543740/current, will proceed with Du for space computation calculation, 2024-12-04T15:52:24,254 WARN [Thread-527 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/711447d9-579e-5d5a-6b73-9d10e101bda2/cluster_96c775b1-ed2f-6758-0f30-956f656d285b/data/data2/current/BP-656230515-172.17.0.2-1733327543740/current, will proceed with Du for space computation calculation, 2024-12-04T15:52:24,272 WARN [Thread-505 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-04T15:52:24,275 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x318e5a7de2b8021a with lease ID 0xc91e508c27faa518: Processing first storage report for DS-fbba735b-d7af-49e4-b748-0c186340c9cb from datanode DatanodeRegistration(127.0.0.1:44907, datanodeUuid=a28e23c7-0f19-40ec-a840-0e817ce154a5, infoPort=41509, infoSecurePort=0, ipcPort=38133, storageInfo=lv=-57;cid=testClusterID;nsid=461759785;c=1733327543740) 2024-12-04T15:52:24,275 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x318e5a7de2b8021a with lease ID 0xc91e508c27faa518: from storage DS-fbba735b-d7af-49e4-b748-0c186340c9cb node DatanodeRegistration(127.0.0.1:44907, datanodeUuid=a28e23c7-0f19-40ec-a840-0e817ce154a5, infoPort=41509, infoSecurePort=0, ipcPort=38133, storageInfo=lv=-57;cid=testClusterID;nsid=461759785;c=1733327543740), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-04T15:52:24,275 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x318e5a7de2b8021a with lease ID 0xc91e508c27faa518: Processing first storage report for DS-734a5e65-0f64-4a0c-a67f-4e4d11d79493 from datanode DatanodeRegistration(127.0.0.1:44907, datanodeUuid=a28e23c7-0f19-40ec-a840-0e817ce154a5, infoPort=41509, infoSecurePort=0, ipcPort=38133, storageInfo=lv=-57;cid=testClusterID;nsid=461759785;c=1733327543740) 2024-12-04T15:52:24,276 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x318e5a7de2b8021a with lease ID 0xc91e508c27faa518: from storage DS-734a5e65-0f64-4a0c-a67f-4e4d11d79493 node DatanodeRegistration(127.0.0.1:44907, datanodeUuid=a28e23c7-0f19-40ec-a840-0e817ce154a5, infoPort=41509, infoSecurePort=0, ipcPort=38133, storageInfo=lv=-57;cid=testClusterID;nsid=461759785;c=1733327543740), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-04T15:52:24,302 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4b6c5f5d{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/711447d9-579e-5d5a-6b73-9d10e101bda2/java.io.tmpdir/jetty-localhost-35619-hadoop-hdfs-3_4_1-tests_jar-_-any-5375309724933643259/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-04T15:52:24,303 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@62ba6a25{HTTP/1.1, (http/1.1)}{localhost:35619} 2024-12-04T15:52:24,303 INFO [Time-limited test {}] server.Server(415): Started @11006ms 2024-12-04T15:52:24,304 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-04T15:52:24,334 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-04T15:52:24,337 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-04T15:52:24,338 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-04T15:52:24,338 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-04T15:52:24,338 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-04T15:52:24,338 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6a9a00ba{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/711447d9-579e-5d5a-6b73-9d10e101bda2/hadoop.log.dir/,AVAILABLE} 2024-12-04T15:52:24,339 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@23f0b9cf{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-04T15:52:24,407 WARN [Thread-562 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/711447d9-579e-5d5a-6b73-9d10e101bda2/cluster_96c775b1-ed2f-6758-0f30-956f656d285b/data/data4/current/BP-656230515-172.17.0.2-1733327543740/current, will proceed with Du for space computation calculation, 2024-12-04T15:52:24,407 WARN [Thread-561 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/711447d9-579e-5d5a-6b73-9d10e101bda2/cluster_96c775b1-ed2f-6758-0f30-956f656d285b/data/data3/current/BP-656230515-172.17.0.2-1733327543740/current, will proceed with Du for space computation calculation, 2024-12-04T15:52:24,430 WARN [Thread-541 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-04T15:52:24,433 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x9ddbb3a24630339d with lease ID 0xc91e508c27faa519: Processing first storage report for DS-c3727f5f-4994-43f9-a3f9-7e2f98cdd66f from datanode DatanodeRegistration(127.0.0.1:44125, datanodeUuid=a7ed0b89-1885-4902-861f-0903e48f53fb, infoPort=42293, infoSecurePort=0, ipcPort=38031, storageInfo=lv=-57;cid=testClusterID;nsid=461759785;c=1733327543740) 2024-12-04T15:52:24,433 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x9ddbb3a24630339d with lease ID 0xc91e508c27faa519: from storage DS-c3727f5f-4994-43f9-a3f9-7e2f98cdd66f node DatanodeRegistration(127.0.0.1:44125, datanodeUuid=a7ed0b89-1885-4902-861f-0903e48f53fb, infoPort=42293, infoSecurePort=0, ipcPort=38031, storageInfo=lv=-57;cid=testClusterID;nsid=461759785;c=1733327543740), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-04T15:52:24,434 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x9ddbb3a24630339d with lease ID 0xc91e508c27faa519: Processing first storage report for DS-a010d955-3481-48aa-8bd3-3b05044620da from datanode DatanodeRegistration(127.0.0.1:44125, datanodeUuid=a7ed0b89-1885-4902-861f-0903e48f53fb, infoPort=42293, infoSecurePort=0, ipcPort=38031, storageInfo=lv=-57;cid=testClusterID;nsid=461759785;c=1733327543740) 2024-12-04T15:52:24,434 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x9ddbb3a24630339d with lease ID 0xc91e508c27faa519: from storage DS-a010d955-3481-48aa-8bd3-3b05044620da node DatanodeRegistration(127.0.0.1:44125, datanodeUuid=a7ed0b89-1885-4902-861f-0903e48f53fb, infoPort=42293, infoSecurePort=0, ipcPort=38031, storageInfo=lv=-57;cid=testClusterID;nsid=461759785;c=1733327543740), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-04T15:52:24,456 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@49ebd318{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/711447d9-579e-5d5a-6b73-9d10e101bda2/java.io.tmpdir/jetty-localhost-39187-hadoop-hdfs-3_4_1-tests_jar-_-any-2160204530250090654/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-04T15:52:24,456 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5298b047{HTTP/1.1, (http/1.1)}{localhost:39187} 2024-12-04T15:52:24,456 INFO [Time-limited test {}] server.Server(415): Started @11159ms 2024-12-04T15:52:24,458 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-04T15:52:24,552 WARN [Thread-588 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/711447d9-579e-5d5a-6b73-9d10e101bda2/cluster_96c775b1-ed2f-6758-0f30-956f656d285b/data/data6/current/BP-656230515-172.17.0.2-1733327543740/current, will proceed with Du for space computation calculation, 2024-12-04T15:52:24,552 WARN [Thread-587 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/711447d9-579e-5d5a-6b73-9d10e101bda2/cluster_96c775b1-ed2f-6758-0f30-956f656d285b/data/data5/current/BP-656230515-172.17.0.2-1733327543740/current, will proceed with Du for space computation calculation, 2024-12-04T15:52:24,575 WARN [Thread-576 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-04T15:52:24,578 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x744fd4892dd76487 with lease ID 0xc91e508c27faa51a: Processing first storage report for DS-9f71f78c-f932-461c-96fd-898d6e98ae56 from datanode DatanodeRegistration(127.0.0.1:33377, datanodeUuid=d113f785-32c4-438a-adec-08bdbf381286, infoPort=38521, infoSecurePort=0, ipcPort=43549, storageInfo=lv=-57;cid=testClusterID;nsid=461759785;c=1733327543740) 2024-12-04T15:52:24,578 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x744fd4892dd76487 with lease ID 0xc91e508c27faa51a: from storage DS-9f71f78c-f932-461c-96fd-898d6e98ae56 node DatanodeRegistration(127.0.0.1:33377, datanodeUuid=d113f785-32c4-438a-adec-08bdbf381286, infoPort=38521, infoSecurePort=0, ipcPort=43549, storageInfo=lv=-57;cid=testClusterID;nsid=461759785;c=1733327543740), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-04T15:52:24,578 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x744fd4892dd76487 with lease ID 0xc91e508c27faa51a: Processing first storage report for DS-5185359b-edeb-426d-b1af-add6aeae145e from datanode DatanodeRegistration(127.0.0.1:33377, datanodeUuid=d113f785-32c4-438a-adec-08bdbf381286, infoPort=38521, infoSecurePort=0, ipcPort=43549, storageInfo=lv=-57;cid=testClusterID;nsid=461759785;c=1733327543740) 2024-12-04T15:52:24,578 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x744fd4892dd76487 with lease ID 0xc91e508c27faa51a: from storage DS-5185359b-edeb-426d-b1af-add6aeae145e node DatanodeRegistration(127.0.0.1:33377, datanodeUuid=d113f785-32c4-438a-adec-08bdbf381286, infoPort=38521, infoSecurePort=0, ipcPort=43549, storageInfo=lv=-57;cid=testClusterID;nsid=461759785;c=1733327543740), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-04T15:52:24,582 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/711447d9-579e-5d5a-6b73-9d10e101bda2 2024-12-04T15:52:24,585 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/711447d9-579e-5d5a-6b73-9d10e101bda2/cluster_96c775b1-ed2f-6758-0f30-956f656d285b/zookeeper_0, clientPort=57411, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/711447d9-579e-5d5a-6b73-9d10e101bda2/cluster_96c775b1-ed2f-6758-0f30-956f656d285b/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/711447d9-579e-5d5a-6b73-9d10e101bda2/cluster_96c775b1-ed2f-6758-0f30-956f656d285b/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-04T15:52:24,586 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=57411 2024-12-04T15:52:24,586 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-04T15:52:24,588 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-04T15:52:24,600 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44907 is added to blk_1073741825_1001 (size=7) 2024-12-04T15:52:24,600 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44125 is added to blk_1073741825_1001 (size=7) 2024-12-04T15:52:24,601 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33377 is added to blk_1073741825_1001 (size=7) 2024-12-04T15:52:24,602 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:41963/user/jenkins/test-data/14cf8f91-458c-f70a-e415-22f2de810d37 with version=8 2024-12-04T15:52:24,602 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:34217/user/jenkins/test-data/f13b5a61-6504-4cf8-1d52-bcda544fecc1/hbase-staging 2024-12-04T15:52:24,604 INFO [Time-limited test {}] client.ConnectionUtils(128): master/a21b6491b371:0 server-side Connection retries=45 2024-12-04T15:52:24,604 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-04T15:52:24,604 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-04T15:52:24,604 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-04T15:52:24,604 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-04T15:52:24,604 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-04T15:52:24,604 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-04T15:52:24,604 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-04T15:52:24,605 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:35791 2024-12-04T15:52:24,606 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:35791 connecting to ZooKeeper ensemble=127.0.0.1:57411 2024-12-04T15:52:24,612 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:357910x0, quorum=127.0.0.1:57411, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-04T15:52:24,612 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:35791-0x1017e2d4b980000 connected 2024-12-04T15:52:24,628 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-04T15:52:24,629 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-04T15:52:24,632 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:35791-0x1017e2d4b980000, quorum=127.0.0.1:57411, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-04T15:52:24,632 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:41963/user/jenkins/test-data/14cf8f91-458c-f70a-e415-22f2de810d37, hbase.cluster.distributed=false 2024-12-04T15:52:24,634 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:35791-0x1017e2d4b980000, quorum=127.0.0.1:57411, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-04T15:52:24,634 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=35791 2024-12-04T15:52:24,634 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=35791 2024-12-04T15:52:24,635 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=35791 2024-12-04T15:52:24,635 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=35791 2024-12-04T15:52:24,635 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=35791 2024-12-04T15:52:24,652 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/a21b6491b371:0 server-side Connection retries=45 2024-12-04T15:52:24,652 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-04T15:52:24,652 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-04T15:52:24,652 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-04T15:52:24,652 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-04T15:52:24,652 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-04T15:52:24,652 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-04T15:52:24,653 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-04T15:52:24,653 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:44623 2024-12-04T15:52:24,655 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:44623 connecting to ZooKeeper ensemble=127.0.0.1:57411 2024-12-04T15:52:24,656 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-04T15:52:24,658 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-04T15:52:24,662 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:446230x0, quorum=127.0.0.1:57411, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-04T15:52:24,662 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:44623-0x1017e2d4b980001 connected 2024-12-04T15:52:24,663 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44623-0x1017e2d4b980001, quorum=127.0.0.1:57411, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-04T15:52:24,663 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-04T15:52:24,663 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-04T15:52:24,664 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44623-0x1017e2d4b980001, quorum=127.0.0.1:57411, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-04T15:52:24,665 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44623-0x1017e2d4b980001, quorum=127.0.0.1:57411, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-04T15:52:24,666 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=44623 2024-12-04T15:52:24,666 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=44623 2024-12-04T15:52:24,666 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=44623 2024-12-04T15:52:24,667 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=44623 2024-12-04T15:52:24,667 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=44623 2024-12-04T15:52:24,683 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/a21b6491b371:0 server-side Connection retries=45 2024-12-04T15:52:24,683 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-04T15:52:24,683 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-04T15:52:24,683 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-04T15:52:24,683 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-04T15:52:24,683 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-04T15:52:24,683 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-04T15:52:24,683 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-04T15:52:24,684 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:38269 2024-12-04T15:52:24,685 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:38269 connecting to ZooKeeper ensemble=127.0.0.1:57411 2024-12-04T15:52:24,686 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-04T15:52:24,688 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-04T15:52:24,691 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:382690x0, quorum=127.0.0.1:57411, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-04T15:52:24,692 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:382690x0, quorum=127.0.0.1:57411, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-04T15:52:24,692 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:38269-0x1017e2d4b980002 connected 2024-12-04T15:52:24,692 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-04T15:52:24,693 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-04T15:52:24,693 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38269-0x1017e2d4b980002, quorum=127.0.0.1:57411, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-04T15:52:24,694 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38269-0x1017e2d4b980002, quorum=127.0.0.1:57411, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-04T15:52:24,695 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=38269 2024-12-04T15:52:24,695 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=38269 2024-12-04T15:52:24,696 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=38269 2024-12-04T15:52:24,697 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=38269 2024-12-04T15:52:24,698 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=38269 2024-12-04T15:52:24,712 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/a21b6491b371:0 server-side Connection retries=45 2024-12-04T15:52:24,712 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-04T15:52:24,712 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-04T15:52:24,712 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-04T15:52:24,713 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-04T15:52:24,713 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-04T15:52:24,713 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-04T15:52:24,713 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-04T15:52:24,713 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:41071 2024-12-04T15:52:24,715 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:41071 connecting to ZooKeeper ensemble=127.0.0.1:57411 2024-12-04T15:52:24,715 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-04T15:52:24,717 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-04T15:52:24,722 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:410710x0, quorum=127.0.0.1:57411, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-04T15:52:24,722 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41071-0x1017e2d4b980003, quorum=127.0.0.1:57411, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-04T15:52:24,722 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:41071-0x1017e2d4b980003 connected 2024-12-04T15:52:24,723 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-04T15:52:24,723 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-04T15:52:24,724 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41071-0x1017e2d4b980003, quorum=127.0.0.1:57411, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-04T15:52:24,725 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41071-0x1017e2d4b980003, quorum=127.0.0.1:57411, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-04T15:52:24,726 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=41071 2024-12-04T15:52:24,726 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=41071 2024-12-04T15:52:24,726 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=41071 2024-12-04T15:52:24,727 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=41071 2024-12-04T15:52:24,727 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=41071 2024-12-04T15:52:24,738 DEBUG [M:0;a21b6491b371:35791 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;a21b6491b371:35791 2024-12-04T15:52:24,739 INFO [master/a21b6491b371:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/a21b6491b371,35791,1733327544603 2024-12-04T15:52:24,740 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44623-0x1017e2d4b980001, quorum=127.0.0.1:57411, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-04T15:52:24,741 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35791-0x1017e2d4b980000, quorum=127.0.0.1:57411, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-04T15:52:24,741 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38269-0x1017e2d4b980002, quorum=127.0.0.1:57411, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-04T15:52:24,741 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41071-0x1017e2d4b980003, quorum=127.0.0.1:57411, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-04T15:52:24,741 DEBUG [master/a21b6491b371:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:35791-0x1017e2d4b980000, quorum=127.0.0.1:57411, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/a21b6491b371,35791,1733327544603 2024-12-04T15:52:24,743 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44623-0x1017e2d4b980001, quorum=127.0.0.1:57411, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-04T15:52:24,743 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44623-0x1017e2d4b980001, quorum=127.0.0.1:57411, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T15:52:24,743 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38269-0x1017e2d4b980002, quorum=127.0.0.1:57411, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-04T15:52:24,743 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35791-0x1017e2d4b980000, quorum=127.0.0.1:57411, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T15:52:24,743 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38269-0x1017e2d4b980002, quorum=127.0.0.1:57411, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T15:52:24,743 DEBUG [master/a21b6491b371:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:35791-0x1017e2d4b980000, quorum=127.0.0.1:57411, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-04T15:52:24,743 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41071-0x1017e2d4b980003, quorum=127.0.0.1:57411, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-04T15:52:24,743 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41071-0x1017e2d4b980003, quorum=127.0.0.1:57411, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T15:52:24,744 INFO [master/a21b6491b371:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/a21b6491b371,35791,1733327544603 from backup master directory 2024-12-04T15:52:24,745 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35791-0x1017e2d4b980000, quorum=127.0.0.1:57411, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/a21b6491b371,35791,1733327544603 2024-12-04T15:52:24,745 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44623-0x1017e2d4b980001, quorum=127.0.0.1:57411, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-04T15:52:24,745 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35791-0x1017e2d4b980000, quorum=127.0.0.1:57411, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-04T15:52:24,745 WARN [master/a21b6491b371:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-04T15:52:24,746 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38269-0x1017e2d4b980002, quorum=127.0.0.1:57411, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-04T15:52:24,746 INFO [master/a21b6491b371:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=a21b6491b371,35791,1733327544603 2024-12-04T15:52:24,746 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41071-0x1017e2d4b980003, quorum=127.0.0.1:57411, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-04T15:52:24,752 DEBUG [master/a21b6491b371:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:41963/user/jenkins/test-data/14cf8f91-458c-f70a-e415-22f2de810d37/hbase.id] with ID: d8becefd-4441-4a2a-b45c-65d3973a7037 2024-12-04T15:52:24,752 DEBUG [master/a21b6491b371:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:41963/user/jenkins/test-data/14cf8f91-458c-f70a-e415-22f2de810d37/.tmp/hbase.id 2024-12-04T15:52:24,760 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44907 is added to blk_1073741826_1002 (size=42) 2024-12-04T15:52:24,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33377 is added to blk_1073741826_1002 (size=42) 2024-12-04T15:52:24,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44125 is added to blk_1073741826_1002 (size=42) 2024-12-04T15:52:24,762 DEBUG [master/a21b6491b371:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:41963/user/jenkins/test-data/14cf8f91-458c-f70a-e415-22f2de810d37/.tmp/hbase.id]:[hdfs://localhost:41963/user/jenkins/test-data/14cf8f91-458c-f70a-e415-22f2de810d37/hbase.id] 2024-12-04T15:52:24,778 INFO [master/a21b6491b371:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-04T15:52:24,778 INFO [master/a21b6491b371:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-04T15:52:24,780 INFO [master/a21b6491b371:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-12-04T15:52:24,784 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44623-0x1017e2d4b980001, quorum=127.0.0.1:57411, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T15:52:24,784 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38269-0x1017e2d4b980002, quorum=127.0.0.1:57411, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T15:52:24,784 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41071-0x1017e2d4b980003, quorum=127.0.0.1:57411, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T15:52:24,784 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35791-0x1017e2d4b980000, quorum=127.0.0.1:57411, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T15:52:24,793 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44907 is added to blk_1073741827_1003 (size=196) 2024-12-04T15:52:24,793 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33377 is added to blk_1073741827_1003 (size=196) 2024-12-04T15:52:24,794 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44125 is added to blk_1073741827_1003 (size=196) 2024-12-04T15:52:24,794 INFO [master/a21b6491b371:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-04T15:52:24,795 INFO [master/a21b6491b371:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-04T15:52:24,795 INFO [master/a21b6491b371:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-04T15:52:24,806 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33377 is added to blk_1073741828_1004 (size=1189) 2024-12-04T15:52:24,806 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44125 is added to blk_1073741828_1004 (size=1189) 2024-12-04T15:52:24,807 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44907 is added to blk_1073741828_1004 (size=1189) 2024-12-04T15:52:24,808 INFO [master/a21b6491b371:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:41963/user/jenkins/test-data/14cf8f91-458c-f70a-e415-22f2de810d37/MasterData/data/master/store 2024-12-04T15:52:24,817 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44907 is added to blk_1073741829_1005 (size=34) 2024-12-04T15:52:24,817 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33377 is added to blk_1073741829_1005 (size=34) 2024-12-04T15:52:24,817 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44125 is added to blk_1073741829_1005 (size=34) 2024-12-04T15:52:24,818 DEBUG [master/a21b6491b371:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-04T15:52:24,818 DEBUG [master/a21b6491b371:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-04T15:52:24,818 INFO [master/a21b6491b371:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-04T15:52:24,818 DEBUG [master/a21b6491b371:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-04T15:52:24,818 DEBUG [master/a21b6491b371:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-04T15:52:24,818 DEBUG [master/a21b6491b371:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-04T15:52:24,818 INFO [master/a21b6491b371:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-04T15:52:24,818 DEBUG [master/a21b6491b371:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733327544818Disabling compacts and flushes for region at 1733327544818Disabling writes for close at 1733327544818Writing region close event to WAL at 1733327544818Closed at 1733327544818 2024-12-04T15:52:24,819 WARN [master/a21b6491b371:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:41963/user/jenkins/test-data/14cf8f91-458c-f70a-e415-22f2de810d37/MasterData/data/master/store/.initializing 2024-12-04T15:52:24,819 DEBUG [master/a21b6491b371:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:41963/user/jenkins/test-data/14cf8f91-458c-f70a-e415-22f2de810d37/MasterData/WALs/a21b6491b371,35791,1733327544603 2024-12-04T15:52:24,823 INFO [master/a21b6491b371:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=a21b6491b371%2C35791%2C1733327544603, suffix=, logDir=hdfs://localhost:41963/user/jenkins/test-data/14cf8f91-458c-f70a-e415-22f2de810d37/MasterData/WALs/a21b6491b371,35791,1733327544603, archiveDir=hdfs://localhost:41963/user/jenkins/test-data/14cf8f91-458c-f70a-e415-22f2de810d37/MasterData/oldWALs, maxLogs=10 2024-12-04T15:52:24,824 INFO [master/a21b6491b371:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor a21b6491b371%2C35791%2C1733327544603.1733327544823 2024-12-04T15:52:24,833 INFO [master/a21b6491b371:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/14cf8f91-458c-f70a-e415-22f2de810d37/MasterData/WALs/a21b6491b371,35791,1733327544603/a21b6491b371%2C35791%2C1733327544603.1733327544823 2024-12-04T15:52:24,834 DEBUG [master/a21b6491b371:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42293:42293),(127.0.0.1/127.0.0.1:38521:38521),(127.0.0.1/127.0.0.1:41509:41509)] 2024-12-04T15:52:24,835 DEBUG [master/a21b6491b371:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-04T15:52:24,835 DEBUG [master/a21b6491b371:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-04T15:52:24,835 DEBUG [master/a21b6491b371:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-04T15:52:24,835 DEBUG [master/a21b6491b371:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-04T15:52:24,837 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-04T15:52:24,839 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-04T15:52:24,839 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:52:24,840 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-04T15:52:24,840 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-04T15:52:24,841 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-04T15:52:24,841 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:52:24,842 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-04T15:52:24,842 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-04T15:52:24,844 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-04T15:52:24,844 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:52:24,845 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-04T15:52:24,845 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-04T15:52:24,847 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-04T15:52:24,847 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:52:24,847 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-04T15:52:24,847 DEBUG [master/a21b6491b371:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-04T15:52:24,848 DEBUG [master/a21b6491b371:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41963/user/jenkins/test-data/14cf8f91-458c-f70a-e415-22f2de810d37/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-04T15:52:24,849 DEBUG [master/a21b6491b371:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41963/user/jenkins/test-data/14cf8f91-458c-f70a-e415-22f2de810d37/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-04T15:52:24,850 DEBUG [master/a21b6491b371:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-04T15:52:24,850 DEBUG [master/a21b6491b371:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-04T15:52:24,851 DEBUG [master/a21b6491b371:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-04T15:52:24,852 DEBUG [master/a21b6491b371:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-04T15:52:24,855 DEBUG [master/a21b6491b371:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41963/user/jenkins/test-data/14cf8f91-458c-f70a-e415-22f2de810d37/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-04T15:52:24,856 INFO [master/a21b6491b371:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=69043077, jitterRate=0.028822019696235657}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-04T15:52:24,857 DEBUG [master/a21b6491b371:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733327544836Initializing all the Stores at 1733327544837 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733327544837Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733327544837Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733327544837Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733327544837Cleaning up temporary data from old regions at 1733327544850 (+13 ms)Region opened successfully at 1733327544857 (+7 ms) 2024-12-04T15:52:24,857 INFO [master/a21b6491b371:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-04T15:52:24,861 DEBUG [master/a21b6491b371:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@563e8060, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=a21b6491b371/172.17.0.2:0 2024-12-04T15:52:24,862 INFO [master/a21b6491b371:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-04T15:52:24,863 INFO [master/a21b6491b371:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-04T15:52:24,863 INFO [master/a21b6491b371:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-04T15:52:24,863 INFO [master/a21b6491b371:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-04T15:52:24,863 INFO [master/a21b6491b371:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-12-04T15:52:24,864 INFO [master/a21b6491b371:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-12-04T15:52:24,864 INFO [master/a21b6491b371:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-04T15:52:24,866 INFO [master/a21b6491b371:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-04T15:52:24,867 DEBUG [master/a21b6491b371:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35791-0x1017e2d4b980000, quorum=127.0.0.1:57411, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-04T15:52:24,868 DEBUG [master/a21b6491b371:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-04T15:52:24,868 INFO [master/a21b6491b371:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-04T15:52:24,869 DEBUG [master/a21b6491b371:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35791-0x1017e2d4b980000, quorum=127.0.0.1:57411, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-04T15:52:24,870 DEBUG [master/a21b6491b371:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-04T15:52:24,870 INFO [master/a21b6491b371:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-04T15:52:24,871 DEBUG [master/a21b6491b371:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35791-0x1017e2d4b980000, quorum=127.0.0.1:57411, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-04T15:52:24,872 DEBUG [master/a21b6491b371:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-04T15:52:24,873 DEBUG [master/a21b6491b371:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35791-0x1017e2d4b980000, quorum=127.0.0.1:57411, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-04T15:52:24,875 DEBUG [master/a21b6491b371:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-04T15:52:24,877 DEBUG [master/a21b6491b371:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35791-0x1017e2d4b980000, quorum=127.0.0.1:57411, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-04T15:52:24,878 DEBUG [master/a21b6491b371:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-04T15:52:24,880 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35791-0x1017e2d4b980000, quorum=127.0.0.1:57411, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-04T15:52:24,880 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44623-0x1017e2d4b980001, quorum=127.0.0.1:57411, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-04T15:52:24,880 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35791-0x1017e2d4b980000, quorum=127.0.0.1:57411, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T15:52:24,880 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44623-0x1017e2d4b980001, quorum=127.0.0.1:57411, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T15:52:24,880 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38269-0x1017e2d4b980002, quorum=127.0.0.1:57411, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-04T15:52:24,880 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38269-0x1017e2d4b980002, quorum=127.0.0.1:57411, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T15:52:24,880 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41071-0x1017e2d4b980003, quorum=127.0.0.1:57411, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-04T15:52:24,880 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41071-0x1017e2d4b980003, quorum=127.0.0.1:57411, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T15:52:24,881 INFO [master/a21b6491b371:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=a21b6491b371,35791,1733327544603, sessionid=0x1017e2d4b980000, setting cluster-up flag (Was=false) 2024-12-04T15:52:24,885 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41071-0x1017e2d4b980003, quorum=127.0.0.1:57411, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T15:52:24,885 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35791-0x1017e2d4b980000, quorum=127.0.0.1:57411, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T15:52:24,885 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38269-0x1017e2d4b980002, quorum=127.0.0.1:57411, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T15:52:24,885 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44623-0x1017e2d4b980001, quorum=127.0.0.1:57411, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T15:52:24,889 DEBUG [master/a21b6491b371:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-04T15:52:24,890 DEBUG [master/a21b6491b371:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=a21b6491b371,35791,1733327544603 2024-12-04T15:52:24,894 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38269-0x1017e2d4b980002, quorum=127.0.0.1:57411, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T15:52:24,894 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44623-0x1017e2d4b980001, quorum=127.0.0.1:57411, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T15:52:24,894 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35791-0x1017e2d4b980000, quorum=127.0.0.1:57411, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T15:52:24,894 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41071-0x1017e2d4b980003, quorum=127.0.0.1:57411, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T15:52:24,900 DEBUG [master/a21b6491b371:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-04T15:52:24,901 DEBUG [master/a21b6491b371:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=a21b6491b371,35791,1733327544603 2024-12-04T15:52:24,903 INFO [master/a21b6491b371:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:41963/user/jenkins/test-data/14cf8f91-458c-f70a-e415-22f2de810d37/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-04T15:52:24,905 DEBUG [master/a21b6491b371:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-04T15:52:24,906 INFO [master/a21b6491b371:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-04T15:52:24,906 INFO [master/a21b6491b371:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-04T15:52:24,906 DEBUG [master/a21b6491b371:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: a21b6491b371,35791,1733327544603 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-04T15:52:24,907 DEBUG [master/a21b6491b371:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/a21b6491b371:0, corePoolSize=5, maxPoolSize=5 2024-12-04T15:52:24,908 DEBUG [master/a21b6491b371:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/a21b6491b371:0, corePoolSize=5, maxPoolSize=5 2024-12-04T15:52:24,908 DEBUG [master/a21b6491b371:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/a21b6491b371:0, corePoolSize=5, maxPoolSize=5 2024-12-04T15:52:24,908 DEBUG [master/a21b6491b371:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/a21b6491b371:0, corePoolSize=5, maxPoolSize=5 2024-12-04T15:52:24,908 DEBUG [master/a21b6491b371:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/a21b6491b371:0, corePoolSize=10, maxPoolSize=10 2024-12-04T15:52:24,908 DEBUG [master/a21b6491b371:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/a21b6491b371:0, corePoolSize=1, maxPoolSize=1 2024-12-04T15:52:24,908 DEBUG [master/a21b6491b371:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/a21b6491b371:0, corePoolSize=2, maxPoolSize=2 2024-12-04T15:52:24,908 DEBUG [master/a21b6491b371:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/a21b6491b371:0, corePoolSize=1, maxPoolSize=1 2024-12-04T15:52:24,912 INFO [master/a21b6491b371:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733327574912 2024-12-04T15:52:24,912 INFO [master/a21b6491b371:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-04T15:52:24,912 INFO [master/a21b6491b371:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-04T15:52:24,912 INFO [master/a21b6491b371:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-04T15:52:24,913 INFO [master/a21b6491b371:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-04T15:52:24,913 INFO [master/a21b6491b371:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-04T15:52:24,913 INFO [master/a21b6491b371:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-04T15:52:24,913 INFO [master/a21b6491b371:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-04T15:52:24,913 INFO [master/a21b6491b371:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-04T15:52:24,913 INFO [master/a21b6491b371:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-04T15:52:24,913 INFO [master/a21b6491b371:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-04T15:52:24,914 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-04T15:52:24,914 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-04T15:52:24,915 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:52:24,915 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-04T15:52:24,916 INFO [master/a21b6491b371:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-04T15:52:24,916 INFO [master/a21b6491b371:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-04T15:52:24,916 DEBUG [master/a21b6491b371:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/a21b6491b371:0:becomeActiveMaster-HFileCleaner.large.0-1733327544916,5,FailOnTimeoutGroup] 2024-12-04T15:52:24,920 DEBUG [master/a21b6491b371:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/a21b6491b371:0:becomeActiveMaster-HFileCleaner.small.0-1733327544916,5,FailOnTimeoutGroup] 2024-12-04T15:52:24,920 INFO [master/a21b6491b371:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-04T15:52:24,920 INFO [master/a21b6491b371:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-04T15:52:24,920 INFO [master/a21b6491b371:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-04T15:52:24,920 INFO [master/a21b6491b371:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-04T15:52:24,930 INFO [RS:0;a21b6491b371:44623 {}] regionserver.HRegionServer(746): ClusterId : d8becefd-4441-4a2a-b45c-65d3973a7037 2024-12-04T15:52:24,930 DEBUG [RS:0;a21b6491b371:44623 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-04T15:52:24,930 INFO [RS:1;a21b6491b371:38269 {}] regionserver.HRegionServer(746): ClusterId : d8becefd-4441-4a2a-b45c-65d3973a7037 2024-12-04T15:52:24,930 DEBUG [RS:1;a21b6491b371:38269 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-04T15:52:24,931 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44125 is added to blk_1073741831_1007 (size=1321) 2024-12-04T15:52:24,931 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33377 is added to blk_1073741831_1007 (size=1321) 2024-12-04T15:52:24,933 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44907 is added to blk_1073741831_1007 (size=1321) 2024-12-04T15:52:24,934 DEBUG [RS:1;a21b6491b371:38269 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-04T15:52:24,934 DEBUG [RS:1;a21b6491b371:38269 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-04T15:52:24,934 DEBUG [RS:0;a21b6491b371:44623 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-04T15:52:24,934 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:41963/user/jenkins/test-data/14cf8f91-458c-f70a-e415-22f2de810d37/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-04T15:52:24,934 DEBUG [RS:0;a21b6491b371:44623 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-04T15:52:24,934 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:41963/user/jenkins/test-data/14cf8f91-458c-f70a-e415-22f2de810d37 2024-12-04T15:52:24,938 DEBUG [RS:0;a21b6491b371:44623 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-04T15:52:24,938 INFO [RS:2;a21b6491b371:41071 {}] regionserver.HRegionServer(746): ClusterId : d8becefd-4441-4a2a-b45c-65d3973a7037 2024-12-04T15:52:24,938 DEBUG [RS:2;a21b6491b371:41071 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-04T15:52:24,938 DEBUG [RS:1;a21b6491b371:38269 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-04T15:52:24,938 DEBUG [RS:0;a21b6491b371:44623 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@284e483b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=a21b6491b371/172.17.0.2:0 2024-12-04T15:52:24,939 DEBUG [RS:1;a21b6491b371:38269 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@482bb5da, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=a21b6491b371/172.17.0.2:0 2024-12-04T15:52:24,940 DEBUG [RS:2;a21b6491b371:41071 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-04T15:52:24,940 DEBUG [RS:2;a21b6491b371:41071 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-04T15:52:24,943 DEBUG [RS:2;a21b6491b371:41071 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-04T15:52:24,945 DEBUG [RS:2;a21b6491b371:41071 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@284e28ca, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=a21b6491b371/172.17.0.2:0 2024-12-04T15:52:24,954 DEBUG [RS:0;a21b6491b371:44623 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;a21b6491b371:44623 2024-12-04T15:52:24,954 INFO [RS:0;a21b6491b371:44623 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-04T15:52:24,954 INFO [RS:0;a21b6491b371:44623 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-04T15:52:24,955 DEBUG [RS:0;a21b6491b371:44623 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-04T15:52:24,956 INFO [RS:0;a21b6491b371:44623 {}] regionserver.HRegionServer(2659): reportForDuty to master=a21b6491b371,35791,1733327544603 with port=44623, startcode=1733327544652 2024-12-04T15:52:24,956 DEBUG [RS:0;a21b6491b371:44623 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-04T15:52:24,962 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35731, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.3 (auth:SIMPLE), service=RegionServerStatusService 2024-12-04T15:52:24,962 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44125 is added to blk_1073741832_1008 (size=32) 2024-12-04T15:52:24,963 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35791 {}] master.ServerManager(363): Checking decommissioned status of RegionServer a21b6491b371,44623,1733327544652 2024-12-04T15:52:24,963 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35791 {}] master.ServerManager(517): Registering regionserver=a21b6491b371,44623,1733327544652 2024-12-04T15:52:24,965 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33377 is added to blk_1073741832_1008 (size=32) 2024-12-04T15:52:24,965 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44907 is added to blk_1073741832_1008 (size=32) 2024-12-04T15:52:24,965 DEBUG [RS:2;a21b6491b371:41071 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:2;a21b6491b371:41071 2024-12-04T15:52:24,965 INFO [RS:2;a21b6491b371:41071 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-04T15:52:24,965 INFO [RS:2;a21b6491b371:41071 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-04T15:52:24,965 DEBUG [RS:2;a21b6491b371:41071 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-04T15:52:24,965 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-04T15:52:24,966 INFO [RS:2;a21b6491b371:41071 {}] regionserver.HRegionServer(2659): reportForDuty to master=a21b6491b371,35791,1733327544603 with port=41071, startcode=1733327544712 2024-12-04T15:52:24,966 DEBUG [RS:2;a21b6491b371:41071 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-04T15:52:24,967 DEBUG [RS:1;a21b6491b371:38269 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;a21b6491b371:38269 2024-12-04T15:52:24,967 INFO [RS:1;a21b6491b371:38269 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-04T15:52:24,967 INFO [RS:1;a21b6491b371:38269 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-04T15:52:24,967 DEBUG [RS:1;a21b6491b371:38269 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-04T15:52:24,968 INFO [RS:1;a21b6491b371:38269 {}] regionserver.HRegionServer(2659): reportForDuty to master=a21b6491b371,35791,1733327544603 with port=38269, startcode=1733327544683 2024-12-04T15:52:24,968 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-04T15:52:24,968 DEBUG [RS:0;a21b6491b371:44623 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:41963/user/jenkins/test-data/14cf8f91-458c-f70a-e415-22f2de810d37 2024-12-04T15:52:24,968 DEBUG [RS:0;a21b6491b371:44623 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:41963 2024-12-04T15:52:24,968 DEBUG [RS:1;a21b6491b371:38269 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-04T15:52:24,968 DEBUG [RS:0;a21b6491b371:44623 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-04T15:52:24,969 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40849, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.5 (auth:SIMPLE), service=RegionServerStatusService 2024-12-04T15:52:24,970 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35791 {}] master.ServerManager(363): Checking decommissioned status of RegionServer a21b6491b371,41071,1733327544712 2024-12-04T15:52:24,970 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35791 {}] master.ServerManager(517): Registering regionserver=a21b6491b371,41071,1733327544712 2024-12-04T15:52:24,970 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35791-0x1017e2d4b980000, quorum=127.0.0.1:57411, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-04T15:52:24,970 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52089, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.4 (auth:SIMPLE), service=RegionServerStatusService 2024-12-04T15:52:24,970 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-04T15:52:24,970 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:52:24,971 DEBUG [RS:0;a21b6491b371:44623 {}] zookeeper.ZKUtil(111): regionserver:44623-0x1017e2d4b980001, quorum=127.0.0.1:57411, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/a21b6491b371,44623,1733327544652 2024-12-04T15:52:24,971 WARN [RS:0;a21b6491b371:44623 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-04T15:52:24,971 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-04T15:52:24,971 INFO [RS:0;a21b6491b371:44623 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-04T15:52:24,971 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-04T15:52:24,971 DEBUG [RS:0;a21b6491b371:44623 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:41963/user/jenkins/test-data/14cf8f91-458c-f70a-e415-22f2de810d37/WALs/a21b6491b371,44623,1733327544652 2024-12-04T15:52:24,972 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35791 {}] master.ServerManager(363): Checking decommissioned status of RegionServer a21b6491b371,38269,1733327544683 2024-12-04T15:52:24,972 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35791 {}] master.ServerManager(517): Registering regionserver=a21b6491b371,38269,1733327544683 2024-12-04T15:52:24,973 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [a21b6491b371,44623,1733327544652] 2024-12-04T15:52:24,973 DEBUG [RS:2;a21b6491b371:41071 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:41963/user/jenkins/test-data/14cf8f91-458c-f70a-e415-22f2de810d37 2024-12-04T15:52:24,973 DEBUG [RS:2;a21b6491b371:41071 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:41963 2024-12-04T15:52:24,973 DEBUG [RS:2;a21b6491b371:41071 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-04T15:52:24,974 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-04T15:52:24,974 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:52:24,975 DEBUG [RS:1;a21b6491b371:38269 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:41963/user/jenkins/test-data/14cf8f91-458c-f70a-e415-22f2de810d37 2024-12-04T15:52:24,975 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-04T15:52:24,975 DEBUG [RS:1;a21b6491b371:38269 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:41963 2024-12-04T15:52:24,975 DEBUG [RS:1;a21b6491b371:38269 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-04T15:52:24,975 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-04T15:52:24,976 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-04T15:52:24,977 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:52:24,977 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-04T15:52:24,977 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-04T15:52:24,979 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-04T15:52:24,979 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35791-0x1017e2d4b980000, quorum=127.0.0.1:57411, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-04T15:52:24,979 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:52:24,980 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-04T15:52:24,980 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-04T15:52:24,980 DEBUG [RS:2;a21b6491b371:41071 {}] zookeeper.ZKUtil(111): regionserver:41071-0x1017e2d4b980003, quorum=127.0.0.1:57411, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/a21b6491b371,41071,1733327544712 2024-12-04T15:52:24,980 WARN [RS:2;a21b6491b371:41071 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-04T15:52:24,980 INFO [RS:2;a21b6491b371:41071 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-04T15:52:24,980 DEBUG [RS:2;a21b6491b371:41071 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:41963/user/jenkins/test-data/14cf8f91-458c-f70a-e415-22f2de810d37/WALs/a21b6491b371,41071,1733327544712 2024-12-04T15:52:24,981 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [a21b6491b371,38269,1733327544683] 2024-12-04T15:52:24,981 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [a21b6491b371,41071,1733327544712] 2024-12-04T15:52:24,981 DEBUG [RS:1;a21b6491b371:38269 {}] zookeeper.ZKUtil(111): regionserver:38269-0x1017e2d4b980002, quorum=127.0.0.1:57411, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/a21b6491b371,38269,1733327544683 2024-12-04T15:52:24,981 WARN [RS:1;a21b6491b371:38269 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-04T15:52:24,981 INFO [RS:1;a21b6491b371:38269 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-04T15:52:24,981 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41963/user/jenkins/test-data/14cf8f91-458c-f70a-e415-22f2de810d37/data/hbase/meta/1588230740 2024-12-04T15:52:24,981 DEBUG [RS:1;a21b6491b371:38269 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:41963/user/jenkins/test-data/14cf8f91-458c-f70a-e415-22f2de810d37/WALs/a21b6491b371,38269,1733327544683 2024-12-04T15:52:24,982 INFO [RS:0;a21b6491b371:44623 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-04T15:52:24,982 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41963/user/jenkins/test-data/14cf8f91-458c-f70a-e415-22f2de810d37/data/hbase/meta/1588230740 2024-12-04T15:52:24,984 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-04T15:52:24,984 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-04T15:52:24,985 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-04T15:52:24,986 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-04T15:52:24,988 INFO [RS:0;a21b6491b371:44623 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-04T15:52:24,989 INFO [RS:0;a21b6491b371:44623 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-04T15:52:24,989 INFO [RS:0;a21b6491b371:44623 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-04T15:52:24,990 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41963/user/jenkins/test-data/14cf8f91-458c-f70a-e415-22f2de810d37/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-04T15:52:24,990 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=70123268, jitterRate=0.04491811990737915}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-04T15:52:24,991 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733327544966Initializing all the Stores at 1733327544967 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733327544967Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733327544968 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733327544968Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733327544968Cleaning up temporary data from old regions at 1733327544984 (+16 ms)Region opened successfully at 1733327544991 (+7 ms) 2024-12-04T15:52:24,991 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-04T15:52:24,991 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-04T15:52:24,991 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-04T15:52:24,991 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-04T15:52:24,991 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-04T15:52:24,992 INFO [RS:2;a21b6491b371:41071 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-04T15:52:24,992 INFO [RS:1;a21b6491b371:38269 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-04T15:52:24,996 INFO [RS:0;a21b6491b371:44623 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-04T15:52:24,997 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-04T15:52:24,997 INFO [RS:2;a21b6491b371:41071 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-04T15:52:24,997 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733327544991Disabling compacts and flushes for region at 1733327544991Disabling writes for close at 1733327544991Writing region close event to WAL at 1733327544997 (+6 ms)Closed at 1733327544997 2024-12-04T15:52:24,997 INFO [RS:0;a21b6491b371:44623 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-04T15:52:24,997 INFO [RS:1;a21b6491b371:38269 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-04T15:52:24,998 INFO [RS:0;a21b6491b371:44623 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-04T15:52:24,998 DEBUG [RS:0;a21b6491b371:44623 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/a21b6491b371:0, corePoolSize=1, maxPoolSize=1 2024-12-04T15:52:24,998 DEBUG [RS:0;a21b6491b371:44623 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/a21b6491b371:0, corePoolSize=1, maxPoolSize=1 2024-12-04T15:52:24,998 DEBUG [RS:0;a21b6491b371:44623 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/a21b6491b371:0, corePoolSize=1, maxPoolSize=1 2024-12-04T15:52:24,998 DEBUG [RS:0;a21b6491b371:44623 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/a21b6491b371:0, corePoolSize=1, maxPoolSize=1 2024-12-04T15:52:24,998 DEBUG [RS:0;a21b6491b371:44623 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/a21b6491b371:0, corePoolSize=1, maxPoolSize=1 2024-12-04T15:52:24,998 DEBUG [RS:0;a21b6491b371:44623 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/a21b6491b371:0, corePoolSize=2, maxPoolSize=2 2024-12-04T15:52:24,998 DEBUG [RS:0;a21b6491b371:44623 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/a21b6491b371:0, corePoolSize=1, maxPoolSize=1 2024-12-04T15:52:24,998 DEBUG [RS:0;a21b6491b371:44623 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/a21b6491b371:0, corePoolSize=1, maxPoolSize=1 2024-12-04T15:52:24,998 DEBUG [RS:0;a21b6491b371:44623 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/a21b6491b371:0, corePoolSize=1, maxPoolSize=1 2024-12-04T15:52:24,999 DEBUG [RS:0;a21b6491b371:44623 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/a21b6491b371:0, corePoolSize=1, maxPoolSize=1 2024-12-04T15:52:24,999 DEBUG [RS:0;a21b6491b371:44623 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/a21b6491b371:0, corePoolSize=1, maxPoolSize=1 2024-12-04T15:52:24,999 DEBUG [RS:0;a21b6491b371:44623 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/a21b6491b371:0, corePoolSize=1, maxPoolSize=1 2024-12-04T15:52:24,999 DEBUG [RS:0;a21b6491b371:44623 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/a21b6491b371:0, corePoolSize=3, maxPoolSize=3 2024-12-04T15:52:24,999 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-04T15:52:24,999 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-04T15:52:24,999 DEBUG [RS:0;a21b6491b371:44623 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/a21b6491b371:0, corePoolSize=3, maxPoolSize=3 2024-12-04T15:52:24,999 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-04T15:52:25,001 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-04T15:52:25,003 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-04T15:52:25,004 INFO [RS:1;a21b6491b371:38269 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-04T15:52:25,004 INFO [RS:1;a21b6491b371:38269 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-04T15:52:25,005 INFO [RS:2;a21b6491b371:41071 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-04T15:52:25,005 INFO [RS:2;a21b6491b371:41071 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-04T15:52:25,005 INFO [RS:1;a21b6491b371:38269 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-04T15:52:25,005 INFO [RS:2;a21b6491b371:41071 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-04T15:52:25,006 INFO [RS:1;a21b6491b371:38269 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-04T15:52:25,006 INFO [RS:2;a21b6491b371:41071 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-04T15:52:25,006 INFO [RS:1;a21b6491b371:38269 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-04T15:52:25,006 INFO [RS:2;a21b6491b371:41071 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-04T15:52:25,006 DEBUG [RS:1;a21b6491b371:38269 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/a21b6491b371:0, corePoolSize=1, maxPoolSize=1 2024-12-04T15:52:25,006 DEBUG [RS:2;a21b6491b371:41071 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/a21b6491b371:0, corePoolSize=1, maxPoolSize=1 2024-12-04T15:52:25,006 DEBUG [RS:2;a21b6491b371:41071 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/a21b6491b371:0, corePoolSize=1, maxPoolSize=1 2024-12-04T15:52:25,006 DEBUG [RS:1;a21b6491b371:38269 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/a21b6491b371:0, corePoolSize=1, maxPoolSize=1 2024-12-04T15:52:25,006 DEBUG [RS:2;a21b6491b371:41071 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/a21b6491b371:0, corePoolSize=1, maxPoolSize=1 2024-12-04T15:52:25,006 DEBUG [RS:1;a21b6491b371:38269 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/a21b6491b371:0, corePoolSize=1, maxPoolSize=1 2024-12-04T15:52:25,006 DEBUG [RS:1;a21b6491b371:38269 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/a21b6491b371:0, corePoolSize=1, maxPoolSize=1 2024-12-04T15:52:25,006 DEBUG [RS:2;a21b6491b371:41071 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/a21b6491b371:0, corePoolSize=1, maxPoolSize=1 2024-12-04T15:52:25,006 DEBUG [RS:2;a21b6491b371:41071 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/a21b6491b371:0, corePoolSize=1, maxPoolSize=1 2024-12-04T15:52:25,006 DEBUG [RS:1;a21b6491b371:38269 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/a21b6491b371:0, corePoolSize=1, maxPoolSize=1 2024-12-04T15:52:25,006 DEBUG [RS:2;a21b6491b371:41071 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/a21b6491b371:0, corePoolSize=2, maxPoolSize=2 2024-12-04T15:52:25,006 DEBUG [RS:1;a21b6491b371:38269 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/a21b6491b371:0, corePoolSize=2, maxPoolSize=2 2024-12-04T15:52:25,007 DEBUG [RS:2;a21b6491b371:41071 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/a21b6491b371:0, corePoolSize=1, maxPoolSize=1 2024-12-04T15:52:25,007 DEBUG [RS:1;a21b6491b371:38269 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/a21b6491b371:0, corePoolSize=1, maxPoolSize=1 2024-12-04T15:52:25,007 DEBUG [RS:2;a21b6491b371:41071 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/a21b6491b371:0, corePoolSize=1, maxPoolSize=1 2024-12-04T15:52:25,007 DEBUG [RS:1;a21b6491b371:38269 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/a21b6491b371:0, corePoolSize=1, maxPoolSize=1 2024-12-04T15:52:25,007 DEBUG [RS:2;a21b6491b371:41071 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/a21b6491b371:0, corePoolSize=1, maxPoolSize=1 2024-12-04T15:52:25,007 DEBUG [RS:1;a21b6491b371:38269 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/a21b6491b371:0, corePoolSize=1, maxPoolSize=1 2024-12-04T15:52:25,007 DEBUG [RS:1;a21b6491b371:38269 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/a21b6491b371:0, corePoolSize=1, maxPoolSize=1 2024-12-04T15:52:25,007 DEBUG [RS:2;a21b6491b371:41071 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/a21b6491b371:0, corePoolSize=1, maxPoolSize=1 2024-12-04T15:52:25,007 DEBUG [RS:1;a21b6491b371:38269 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/a21b6491b371:0, corePoolSize=1, maxPoolSize=1 2024-12-04T15:52:25,007 DEBUG [RS:2;a21b6491b371:41071 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/a21b6491b371:0, corePoolSize=1, maxPoolSize=1 2024-12-04T15:52:25,007 DEBUG [RS:2;a21b6491b371:41071 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/a21b6491b371:0, corePoolSize=1, maxPoolSize=1 2024-12-04T15:52:25,007 DEBUG [RS:1;a21b6491b371:38269 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/a21b6491b371:0, corePoolSize=1, maxPoolSize=1 2024-12-04T15:52:25,007 DEBUG [RS:1;a21b6491b371:38269 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/a21b6491b371:0, corePoolSize=3, maxPoolSize=3 2024-12-04T15:52:25,007 DEBUG [RS:2;a21b6491b371:41071 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/a21b6491b371:0, corePoolSize=3, maxPoolSize=3 2024-12-04T15:52:25,007 DEBUG [RS:2;a21b6491b371:41071 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/a21b6491b371:0, corePoolSize=3, maxPoolSize=3 2024-12-04T15:52:25,007 DEBUG [RS:1;a21b6491b371:38269 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/a21b6491b371:0, corePoolSize=3, maxPoolSize=3 2024-12-04T15:52:25,009 INFO [RS:0;a21b6491b371:44623 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-04T15:52:25,009 INFO [RS:0;a21b6491b371:44623 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-04T15:52:25,010 INFO [RS:0;a21b6491b371:44623 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-04T15:52:25,010 INFO [RS:0;a21b6491b371:44623 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-04T15:52:25,010 INFO [RS:0;a21b6491b371:44623 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-04T15:52:25,010 INFO [RS:0;a21b6491b371:44623 {}] hbase.ChoreService(168): Chore ScheduledChore name=a21b6491b371,44623,1733327544652-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-04T15:52:25,012 INFO [RS:2;a21b6491b371:41071 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-04T15:52:25,012 INFO [RS:2;a21b6491b371:41071 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-04T15:52:25,012 INFO [RS:2;a21b6491b371:41071 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-04T15:52:25,012 INFO [RS:2;a21b6491b371:41071 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-04T15:52:25,013 INFO [RS:2;a21b6491b371:41071 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-04T15:52:25,013 INFO [RS:2;a21b6491b371:41071 {}] hbase.ChoreService(168): Chore ScheduledChore name=a21b6491b371,41071,1733327544712-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-04T15:52:25,015 INFO [RS:1;a21b6491b371:38269 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-04T15:52:25,015 INFO [RS:1;a21b6491b371:38269 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-04T15:52:25,015 INFO [RS:1;a21b6491b371:38269 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-04T15:52:25,015 INFO [RS:1;a21b6491b371:38269 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-04T15:52:25,015 INFO [RS:1;a21b6491b371:38269 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-04T15:52:25,015 INFO [RS:1;a21b6491b371:38269 {}] hbase.ChoreService(168): Chore ScheduledChore name=a21b6491b371,38269,1733327544683-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-04T15:52:25,026 INFO [RS:0;a21b6491b371:44623 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-04T15:52:25,026 INFO [RS:0;a21b6491b371:44623 {}] hbase.ChoreService(168): Chore ScheduledChore name=a21b6491b371,44623,1733327544652-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-04T15:52:25,027 INFO [RS:0;a21b6491b371:44623 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-04T15:52:25,027 INFO [RS:0;a21b6491b371:44623 {}] regionserver.Replication(171): a21b6491b371,44623,1733327544652 started 2024-12-04T15:52:25,031 INFO [RS:2;a21b6491b371:41071 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-04T15:52:25,031 INFO [RS:2;a21b6491b371:41071 {}] hbase.ChoreService(168): Chore ScheduledChore name=a21b6491b371,41071,1733327544712-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-04T15:52:25,031 INFO [RS:2;a21b6491b371:41071 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-04T15:52:25,031 INFO [RS:2;a21b6491b371:41071 {}] regionserver.Replication(171): a21b6491b371,41071,1733327544712 started 2024-12-04T15:52:25,044 INFO [RS:1;a21b6491b371:38269 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-04T15:52:25,044 INFO [RS:1;a21b6491b371:38269 {}] hbase.ChoreService(168): Chore ScheduledChore name=a21b6491b371,38269,1733327544683-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-04T15:52:25,044 INFO [RS:1;a21b6491b371:38269 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-04T15:52:25,044 INFO [RS:1;a21b6491b371:38269 {}] regionserver.Replication(171): a21b6491b371,38269,1733327544683 started 2024-12-04T15:52:25,045 INFO [RS:2;a21b6491b371:41071 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-04T15:52:25,045 INFO [RS:2;a21b6491b371:41071 {}] regionserver.HRegionServer(1482): Serving as a21b6491b371,41071,1733327544712, RpcServer on a21b6491b371/172.17.0.2:41071, sessionid=0x1017e2d4b980003 2024-12-04T15:52:25,045 DEBUG [RS:2;a21b6491b371:41071 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-04T15:52:25,045 DEBUG [RS:2;a21b6491b371:41071 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager a21b6491b371,41071,1733327544712 2024-12-04T15:52:25,045 DEBUG [RS:2;a21b6491b371:41071 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'a21b6491b371,41071,1733327544712' 2024-12-04T15:52:25,046 DEBUG [RS:2;a21b6491b371:41071 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-04T15:52:25,046 DEBUG [RS:2;a21b6491b371:41071 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-04T15:52:25,047 DEBUG [RS:2;a21b6491b371:41071 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-04T15:52:25,047 DEBUG [RS:2;a21b6491b371:41071 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-04T15:52:25,047 DEBUG [RS:2;a21b6491b371:41071 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager a21b6491b371,41071,1733327544712 2024-12-04T15:52:25,047 DEBUG [RS:2;a21b6491b371:41071 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'a21b6491b371,41071,1733327544712' 2024-12-04T15:52:25,047 DEBUG [RS:2;a21b6491b371:41071 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-04T15:52:25,047 DEBUG [RS:2;a21b6491b371:41071 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-04T15:52:25,047 INFO [RS:0;a21b6491b371:44623 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-04T15:52:25,047 INFO [RS:0;a21b6491b371:44623 {}] regionserver.HRegionServer(1482): Serving as a21b6491b371,44623,1733327544652, RpcServer on a21b6491b371/172.17.0.2:44623, sessionid=0x1017e2d4b980001 2024-12-04T15:52:25,048 DEBUG [RS:0;a21b6491b371:44623 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-04T15:52:25,048 DEBUG [RS:0;a21b6491b371:44623 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager a21b6491b371,44623,1733327544652 2024-12-04T15:52:25,048 DEBUG [RS:0;a21b6491b371:44623 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'a21b6491b371,44623,1733327544652' 2024-12-04T15:52:25,048 DEBUG [RS:2;a21b6491b371:41071 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-04T15:52:25,048 DEBUG [RS:0;a21b6491b371:44623 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-04T15:52:25,048 INFO [RS:2;a21b6491b371:41071 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-04T15:52:25,048 INFO [RS:2;a21b6491b371:41071 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-04T15:52:25,048 DEBUG [RS:0;a21b6491b371:44623 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-04T15:52:25,049 DEBUG [RS:0;a21b6491b371:44623 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-04T15:52:25,049 DEBUG [RS:0;a21b6491b371:44623 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-04T15:52:25,049 DEBUG [RS:0;a21b6491b371:44623 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager a21b6491b371,44623,1733327544652 2024-12-04T15:52:25,049 DEBUG [RS:0;a21b6491b371:44623 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'a21b6491b371,44623,1733327544652' 2024-12-04T15:52:25,049 DEBUG [RS:0;a21b6491b371:44623 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-04T15:52:25,049 DEBUG [RS:0;a21b6491b371:44623 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-04T15:52:25,050 DEBUG [RS:0;a21b6491b371:44623 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-04T15:52:25,050 INFO [RS:0;a21b6491b371:44623 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-04T15:52:25,050 INFO [RS:0;a21b6491b371:44623 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-04T15:52:25,061 INFO [RS:1;a21b6491b371:38269 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-04T15:52:25,061 INFO [RS:1;a21b6491b371:38269 {}] regionserver.HRegionServer(1482): Serving as a21b6491b371,38269,1733327544683, RpcServer on a21b6491b371/172.17.0.2:38269, sessionid=0x1017e2d4b980002 2024-12-04T15:52:25,061 DEBUG [RS:1;a21b6491b371:38269 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-04T15:52:25,061 DEBUG [RS:1;a21b6491b371:38269 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager a21b6491b371,38269,1733327544683 2024-12-04T15:52:25,061 DEBUG [RS:1;a21b6491b371:38269 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'a21b6491b371,38269,1733327544683' 2024-12-04T15:52:25,061 DEBUG [RS:1;a21b6491b371:38269 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-04T15:52:25,062 DEBUG [RS:1;a21b6491b371:38269 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-04T15:52:25,062 DEBUG [RS:1;a21b6491b371:38269 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-04T15:52:25,062 DEBUG [RS:1;a21b6491b371:38269 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-04T15:52:25,062 DEBUG [RS:1;a21b6491b371:38269 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager a21b6491b371,38269,1733327544683 2024-12-04T15:52:25,062 DEBUG [RS:1;a21b6491b371:38269 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'a21b6491b371,38269,1733327544683' 2024-12-04T15:52:25,062 DEBUG [RS:1;a21b6491b371:38269 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-04T15:52:25,063 DEBUG [RS:1;a21b6491b371:38269 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-04T15:52:25,063 DEBUG [RS:1;a21b6491b371:38269 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-04T15:52:25,063 INFO [RS:1;a21b6491b371:38269 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-04T15:52:25,063 INFO [RS:1;a21b6491b371:38269 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-04T15:52:25,151 INFO [RS:2;a21b6491b371:41071 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=a21b6491b371%2C41071%2C1733327544712, suffix=, logDir=hdfs://localhost:41963/user/jenkins/test-data/14cf8f91-458c-f70a-e415-22f2de810d37/WALs/a21b6491b371,41071,1733327544712, archiveDir=hdfs://localhost:41963/user/jenkins/test-data/14cf8f91-458c-f70a-e415-22f2de810d37/oldWALs, maxLogs=32 2024-12-04T15:52:25,152 INFO [RS:0;a21b6491b371:44623 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=a21b6491b371%2C44623%2C1733327544652, suffix=, logDir=hdfs://localhost:41963/user/jenkins/test-data/14cf8f91-458c-f70a-e415-22f2de810d37/WALs/a21b6491b371,44623,1733327544652, archiveDir=hdfs://localhost:41963/user/jenkins/test-data/14cf8f91-458c-f70a-e415-22f2de810d37/oldWALs, maxLogs=32 2024-12-04T15:52:25,153 INFO [RS:2;a21b6491b371:41071 {}] monitor.StreamSlowMonitor(122): New stream slow monitor a21b6491b371%2C41071%2C1733327544712.1733327545152 2024-12-04T15:52:25,153 WARN [a21b6491b371:35791 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-04T15:52:25,154 INFO [RS:0;a21b6491b371:44623 {}] monitor.StreamSlowMonitor(122): New stream slow monitor a21b6491b371%2C44623%2C1733327544652.1733327545154 2024-12-04T15:52:25,161 INFO [RS:2;a21b6491b371:41071 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/14cf8f91-458c-f70a-e415-22f2de810d37/WALs/a21b6491b371,41071,1733327544712/a21b6491b371%2C41071%2C1733327544712.1733327545152 2024-12-04T15:52:25,165 DEBUG [RS:2;a21b6491b371:41071 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41509:41509),(127.0.0.1/127.0.0.1:42293:42293),(127.0.0.1/127.0.0.1:38521:38521)] 2024-12-04T15:52:25,165 INFO [RS:0;a21b6491b371:44623 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/14cf8f91-458c-f70a-e415-22f2de810d37/WALs/a21b6491b371,44623,1733327544652/a21b6491b371%2C44623%2C1733327544652.1733327545154 2024-12-04T15:52:25,166 INFO [RS:1;a21b6491b371:38269 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=a21b6491b371%2C38269%2C1733327544683, suffix=, logDir=hdfs://localhost:41963/user/jenkins/test-data/14cf8f91-458c-f70a-e415-22f2de810d37/WALs/a21b6491b371,38269,1733327544683, archiveDir=hdfs://localhost:41963/user/jenkins/test-data/14cf8f91-458c-f70a-e415-22f2de810d37/oldWALs, maxLogs=32 2024-12-04T15:52:25,166 INFO [RS:1;a21b6491b371:38269 {}] monitor.StreamSlowMonitor(122): New stream slow monitor a21b6491b371%2C38269%2C1733327544683.1733327545166 2024-12-04T15:52:25,173 DEBUG [RS:0;a21b6491b371:44623 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42293:42293),(127.0.0.1/127.0.0.1:38521:38521),(127.0.0.1/127.0.0.1:41509:41509)] 2024-12-04T15:52:25,180 INFO [RS:1;a21b6491b371:38269 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/14cf8f91-458c-f70a-e415-22f2de810d37/WALs/a21b6491b371,38269,1733327544683/a21b6491b371%2C38269%2C1733327544683.1733327545166 2024-12-04T15:52:25,182 DEBUG [RS:1;a21b6491b371:38269 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38521:38521),(127.0.0.1/127.0.0.1:42293:42293),(127.0.0.1/127.0.0.1:41509:41509)] 2024-12-04T15:52:25,403 DEBUG [a21b6491b371:35791 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=3, allServersCount=3 2024-12-04T15:52:25,404 DEBUG [a21b6491b371:35791 {}] balancer.BalancerClusterState(204): Hosts are {a21b6491b371=0} racks are {/default-rack=0} 2024-12-04T15:52:25,406 DEBUG [a21b6491b371:35791 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-04T15:52:25,406 DEBUG [a21b6491b371:35791 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-04T15:52:25,406 DEBUG [a21b6491b371:35791 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-04T15:52:25,406 DEBUG [a21b6491b371:35791 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-04T15:52:25,406 DEBUG [a21b6491b371:35791 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-04T15:52:25,406 DEBUG [a21b6491b371:35791 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-04T15:52:25,406 INFO [a21b6491b371:35791 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-04T15:52:25,406 INFO [a21b6491b371:35791 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-04T15:52:25,406 INFO [a21b6491b371:35791 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-04T15:52:25,406 DEBUG [a21b6491b371:35791 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-04T15:52:25,407 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=a21b6491b371,38269,1733327544683 2024-12-04T15:52:25,409 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as a21b6491b371,38269,1733327544683, state=OPENING 2024-12-04T15:52:25,410 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-04T15:52:25,413 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44623-0x1017e2d4b980001, quorum=127.0.0.1:57411, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T15:52:25,413 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35791-0x1017e2d4b980000, quorum=127.0.0.1:57411, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T15:52:25,413 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41071-0x1017e2d4b980003, quorum=127.0.0.1:57411, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T15:52:25,413 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38269-0x1017e2d4b980002, quorum=127.0.0.1:57411, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T15:52:25,414 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-04T15:52:25,414 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-04T15:52:25,414 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-04T15:52:25,414 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-04T15:52:25,414 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-04T15:52:25,414 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=a21b6491b371,38269,1733327544683}] 2024-12-04T15:52:25,568 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-04T15:52:25,570 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-9-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40973, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-04T15:52:25,575 INFO [RS_OPEN_META-regionserver/a21b6491b371:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-04T15:52:25,575 INFO [RS_OPEN_META-regionserver/a21b6491b371:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-04T15:52:25,578 INFO [RS_OPEN_META-regionserver/a21b6491b371:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=a21b6491b371%2C38269%2C1733327544683.meta, suffix=.meta, logDir=hdfs://localhost:41963/user/jenkins/test-data/14cf8f91-458c-f70a-e415-22f2de810d37/WALs/a21b6491b371,38269,1733327544683, archiveDir=hdfs://localhost:41963/user/jenkins/test-data/14cf8f91-458c-f70a-e415-22f2de810d37/oldWALs, maxLogs=32 2024-12-04T15:52:25,578 INFO [RS_OPEN_META-regionserver/a21b6491b371:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor a21b6491b371%2C38269%2C1733327544683.meta.1733327545578.meta 2024-12-04T15:52:25,590 INFO [RS_OPEN_META-regionserver/a21b6491b371:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/14cf8f91-458c-f70a-e415-22f2de810d37/WALs/a21b6491b371,38269,1733327544683/a21b6491b371%2C38269%2C1733327544683.meta.1733327545578.meta 2024-12-04T15:52:25,596 DEBUG [RS_OPEN_META-regionserver/a21b6491b371:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42293:42293),(127.0.0.1/127.0.0.1:41509:41509),(127.0.0.1/127.0.0.1:38521:38521)] 2024-12-04T15:52:25,598 DEBUG [RS_OPEN_META-regionserver/a21b6491b371:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-04T15:52:25,598 DEBUG [RS_OPEN_META-regionserver/a21b6491b371:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-04T15:52:25,598 DEBUG [RS_OPEN_META-regionserver/a21b6491b371:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-04T15:52:25,598 INFO [RS_OPEN_META-regionserver/a21b6491b371:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-04T15:52:25,599 DEBUG [RS_OPEN_META-regionserver/a21b6491b371:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-04T15:52:25,599 DEBUG [RS_OPEN_META-regionserver/a21b6491b371:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-04T15:52:25,599 DEBUG [RS_OPEN_META-regionserver/a21b6491b371:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-04T15:52:25,599 DEBUG [RS_OPEN_META-regionserver/a21b6491b371:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-04T15:52:25,601 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-04T15:52:25,602 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-04T15:52:25,603 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:52:25,603 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-04T15:52:25,603 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-04T15:52:25,604 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-04T15:52:25,605 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:52:25,605 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-04T15:52:25,605 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-04T15:52:25,606 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-04T15:52:25,606 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:52:25,607 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-04T15:52:25,607 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-04T15:52:25,608 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-04T15:52:25,608 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:52:25,608 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-04T15:52:25,609 DEBUG [RS_OPEN_META-regionserver/a21b6491b371:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-04T15:52:25,610 DEBUG [RS_OPEN_META-regionserver/a21b6491b371:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41963/user/jenkins/test-data/14cf8f91-458c-f70a-e415-22f2de810d37/data/hbase/meta/1588230740 2024-12-04T15:52:25,611 DEBUG [RS_OPEN_META-regionserver/a21b6491b371:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41963/user/jenkins/test-data/14cf8f91-458c-f70a-e415-22f2de810d37/data/hbase/meta/1588230740 2024-12-04T15:52:25,613 DEBUG [RS_OPEN_META-regionserver/a21b6491b371:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-04T15:52:25,613 DEBUG [RS_OPEN_META-regionserver/a21b6491b371:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-04T15:52:25,613 DEBUG [RS_OPEN_META-regionserver/a21b6491b371:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-04T15:52:25,615 DEBUG [RS_OPEN_META-regionserver/a21b6491b371:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-04T15:52:25,616 INFO [RS_OPEN_META-regionserver/a21b6491b371:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=72198545, jitterRate=0.07584215700626373}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-04T15:52:25,616 DEBUG [RS_OPEN_META-regionserver/a21b6491b371:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-04T15:52:25,617 DEBUG [RS_OPEN_META-regionserver/a21b6491b371:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733327545599Writing region info on filesystem at 1733327545599Initializing all the Stores at 1733327545600 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733327545601 (+1 ms)Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733327545601Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733327545601Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733327545601Cleaning up temporary data from old regions at 1733327545613 (+12 ms)Running coprocessor post-open hooks at 1733327545616 (+3 ms)Region opened successfully at 1733327545617 (+1 ms) 2024-12-04T15:52:25,619 INFO [RS_OPEN_META-regionserver/a21b6491b371:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733327545568 2024-12-04T15:52:25,622 DEBUG [RS_OPEN_META-regionserver/a21b6491b371:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-04T15:52:25,623 INFO [RS_OPEN_META-regionserver/a21b6491b371:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-04T15:52:25,624 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=a21b6491b371,38269,1733327544683 2024-12-04T15:52:25,625 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as a21b6491b371,38269,1733327544683, state=OPEN 2024-12-04T15:52:25,627 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35791-0x1017e2d4b980000, quorum=127.0.0.1:57411, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-04T15:52:25,627 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41071-0x1017e2d4b980003, quorum=127.0.0.1:57411, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-04T15:52:25,627 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44623-0x1017e2d4b980001, quorum=127.0.0.1:57411, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-04T15:52:25,627 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38269-0x1017e2d4b980002, quorum=127.0.0.1:57411, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-04T15:52:25,627 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-04T15:52:25,627 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=a21b6491b371,38269,1733327544683 2024-12-04T15:52:25,627 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-04T15:52:25,627 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-04T15:52:25,627 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-04T15:52:25,632 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-04T15:52:25,632 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=a21b6491b371,38269,1733327544683 in 213 msec 2024-12-04T15:52:25,636 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-04T15:52:25,636 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 633 msec 2024-12-04T15:52:25,637 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-04T15:52:25,637 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-04T15:52:25,639 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-04T15:52:25,639 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=a21b6491b371,38269,1733327544683, seqNum=-1] 2024-12-04T15:52:25,640 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-04T15:52:25,641 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-9-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53311, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-04T15:52:25,649 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 743 msec 2024-12-04T15:52:25,650 INFO [master/a21b6491b371:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733327545649, completionTime=-1 2024-12-04T15:52:25,650 INFO [master/a21b6491b371:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=3; waited=0ms, expected min=3 server(s), max=3 server(s), master is running 2024-12-04T15:52:25,650 DEBUG [master/a21b6491b371:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-04T15:52:25,652 INFO [master/a21b6491b371:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=3 2024-12-04T15:52:25,652 INFO [master/a21b6491b371:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733327605652 2024-12-04T15:52:25,652 INFO [master/a21b6491b371:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733327665652 2024-12-04T15:52:25,652 INFO [master/a21b6491b371:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-12-04T15:52:25,653 INFO [master/a21b6491b371:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=a21b6491b371,35791,1733327544603-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-04T15:52:25,653 INFO [master/a21b6491b371:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=a21b6491b371,35791,1733327544603-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-04T15:52:25,653 INFO [master/a21b6491b371:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=a21b6491b371,35791,1733327544603-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-04T15:52:25,653 INFO [master/a21b6491b371:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-a21b6491b371:35791, period=300000, unit=MILLISECONDS is enabled. 2024-12-04T15:52:25,653 INFO [master/a21b6491b371:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-04T15:52:25,653 INFO [master/a21b6491b371:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-04T15:52:25,655 DEBUG [master/a21b6491b371:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-04T15:52:25,658 INFO [master/a21b6491b371:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.912sec 2024-12-04T15:52:25,658 INFO [master/a21b6491b371:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-04T15:52:25,658 INFO [master/a21b6491b371:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-04T15:52:25,658 INFO [master/a21b6491b371:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-04T15:52:25,658 INFO [master/a21b6491b371:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-04T15:52:25,658 INFO [master/a21b6491b371:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-04T15:52:25,658 INFO [master/a21b6491b371:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=a21b6491b371,35791,1733327544603-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-04T15:52:25,658 INFO [master/a21b6491b371:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=a21b6491b371,35791,1733327544603-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-04T15:52:25,661 DEBUG [master/a21b6491b371:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-04T15:52:25,661 INFO [master/a21b6491b371:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-04T15:52:25,661 INFO [master/a21b6491b371:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=a21b6491b371,35791,1733327544603-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-04T15:52:25,738 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@534ba2d3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-04T15:52:25,738 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request a21b6491b371,35791,-1 for getting cluster id 2024-12-04T15:52:25,738 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-04T15:52:25,740 DEBUG [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'd8becefd-4441-4a2a-b45c-65d3973a7037' 2024-12-04T15:52:25,740 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-04T15:52:25,740 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "d8becefd-4441-4a2a-b45c-65d3973a7037" 2024-12-04T15:52:25,741 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6eb3073a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-04T15:52:25,741 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [a21b6491b371,35791,-1] 2024-12-04T15:52:25,741 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-04T15:52:25,741 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-04T15:52:25,743 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42482, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-04T15:52:25,744 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@58397a7a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-04T15:52:25,744 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-04T15:52:25,745 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=a21b6491b371,38269,1733327544683, seqNum=-1] 2024-12-04T15:52:25,746 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-04T15:52:25,747 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-9-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53008, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-04T15:52:25,749 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=a21b6491b371,35791,1733327544603 2024-12-04T15:52:25,750 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-04T15:52:25,751 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] client.AsyncConnectionImpl(321): The fetched master address is a21b6491b371,35791,1733327544603 2024-12-04T15:52:25,751 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@75c0db74 2024-12-04T15:52:25,752 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-04T15:52:25,753 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42488, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-04T15:52:25,754 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35791 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-04T15:52:25,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35791 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC 2024-12-04T15:52:25,758 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_PRE_OPERATION 2024-12-04T15:52:25,758 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:52:25,758 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35791 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestHBaseWalOnEC" procId is: 4 2024-12-04T15:52:25,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35791 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-04T15:52:25,760 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-04T15:52:25,769 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44907 is added to blk_1073741837_1013 (size=392) 2024-12-04T15:52:25,770 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33377 is added to blk_1073741837_1013 (size=392) 2024-12-04T15:52:25,770 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44125 is added to blk_1073741837_1013 (size=392) 2024-12-04T15:52:25,772 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 5117c7d66ce88a349ce6d6a8bdbf45a2, NAME => 'TestHBaseWalOnEC,,1733327545754.5117c7d66ce88a349ce6d6a8bdbf45a2.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:41963/user/jenkins/test-data/14cf8f91-458c-f70a-e415-22f2de810d37 2024-12-04T15:52:25,781 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33377 is added to blk_1073741838_1014 (size=51) 2024-12-04T15:52:25,782 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44907 is added to blk_1073741838_1014 (size=51) 2024-12-04T15:52:25,782 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44125 is added to blk_1073741838_1014 (size=51) 2024-12-04T15:52:25,783 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1733327545754.5117c7d66ce88a349ce6d6a8bdbf45a2.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-04T15:52:25,783 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1722): Closing 5117c7d66ce88a349ce6d6a8bdbf45a2, disabling compactions & flushes 2024-12-04T15:52:25,783 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1733327545754.5117c7d66ce88a349ce6d6a8bdbf45a2. 2024-12-04T15:52:25,783 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1733327545754.5117c7d66ce88a349ce6d6a8bdbf45a2. 2024-12-04T15:52:25,783 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1733327545754.5117c7d66ce88a349ce6d6a8bdbf45a2. after waiting 0 ms 2024-12-04T15:52:25,783 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1733327545754.5117c7d66ce88a349ce6d6a8bdbf45a2. 2024-12-04T15:52:25,783 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1733327545754.5117c7d66ce88a349ce6d6a8bdbf45a2. 2024-12-04T15:52:25,783 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1676): Region close journal for 5117c7d66ce88a349ce6d6a8bdbf45a2: Waiting for close lock at 1733327545783Disabling compacts and flushes for region at 1733327545783Disabling writes for close at 1733327545783Writing region close event to WAL at 1733327545783Closed at 1733327545783 2024-12-04T15:52:25,786 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ADD_TO_META 2024-12-04T15:52:25,786 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestHBaseWalOnEC,,1733327545754.5117c7d66ce88a349ce6d6a8bdbf45a2.","families":{"info":[{"qualifier":"regioninfo","vlen":50,"tag":[],"timestamp":"1733327545786"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733327545786"}]},"ts":"1733327545786"} 2024-12-04T15:52:25,790 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-04T15:52:25,791 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-04T15:52:25,792 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733327545791"}]},"ts":"1733327545791"} 2024-12-04T15:52:25,794 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLING in hbase:meta 2024-12-04T15:52:25,795 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {a21b6491b371=0} racks are {/default-rack=0} 2024-12-04T15:52:25,795 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-04T15:52:25,795 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-04T15:52:25,795 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-04T15:52:25,796 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-04T15:52:25,796 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-04T15:52:25,796 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-04T15:52:25,796 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-04T15:52:25,796 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-04T15:52:25,796 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-04T15:52:25,796 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-04T15:52:25,796 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=5117c7d66ce88a349ce6d6a8bdbf45a2, ASSIGN}] 2024-12-04T15:52:25,798 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=5117c7d66ce88a349ce6d6a8bdbf45a2, ASSIGN 2024-12-04T15:52:25,799 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=5117c7d66ce88a349ce6d6a8bdbf45a2, ASSIGN; state=OFFLINE, location=a21b6491b371,44623,1733327544652; forceNewPlan=false, retain=false 2024-12-04T15:52:25,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35791 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-04T15:52:25,950 INFO [a21b6491b371:35791 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-04T15:52:25,950 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=5117c7d66ce88a349ce6d6a8bdbf45a2, regionState=OPENING, regionLocation=a21b6491b371,44623,1733327544652 2024-12-04T15:52:25,955 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-10-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=5117c7d66ce88a349ce6d6a8bdbf45a2, ASSIGN because future has completed 2024-12-04T15:52:25,955 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 5117c7d66ce88a349ce6d6a8bdbf45a2, server=a21b6491b371,44623,1733327544652}] 2024-12-04T15:52:26,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35791 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-04T15:52:26,109 DEBUG [RSProcedureDispatcher-pool-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-04T15:52:26,111 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49709, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-04T15:52:26,117 INFO [RS_OPEN_REGION-regionserver/a21b6491b371:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestHBaseWalOnEC,,1733327545754.5117c7d66ce88a349ce6d6a8bdbf45a2. 2024-12-04T15:52:26,117 DEBUG [RS_OPEN_REGION-regionserver/a21b6491b371:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 5117c7d66ce88a349ce6d6a8bdbf45a2, NAME => 'TestHBaseWalOnEC,,1733327545754.5117c7d66ce88a349ce6d6a8bdbf45a2.', STARTKEY => '', ENDKEY => ''} 2024-12-04T15:52:26,117 DEBUG [RS_OPEN_REGION-regionserver/a21b6491b371:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestHBaseWalOnEC 5117c7d66ce88a349ce6d6a8bdbf45a2 2024-12-04T15:52:26,117 DEBUG [RS_OPEN_REGION-regionserver/a21b6491b371:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1733327545754.5117c7d66ce88a349ce6d6a8bdbf45a2.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-04T15:52:26,118 DEBUG [RS_OPEN_REGION-regionserver/a21b6491b371:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 5117c7d66ce88a349ce6d6a8bdbf45a2 2024-12-04T15:52:26,118 DEBUG [RS_OPEN_REGION-regionserver/a21b6491b371:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 5117c7d66ce88a349ce6d6a8bdbf45a2 2024-12-04T15:52:26,119 INFO [StoreOpener-5117c7d66ce88a349ce6d6a8bdbf45a2-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 5117c7d66ce88a349ce6d6a8bdbf45a2 2024-12-04T15:52:26,121 INFO [StoreOpener-5117c7d66ce88a349ce6d6a8bdbf45a2-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 5117c7d66ce88a349ce6d6a8bdbf45a2 columnFamilyName cf 2024-12-04T15:52:26,121 DEBUG [StoreOpener-5117c7d66ce88a349ce6d6a8bdbf45a2-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T15:52:26,122 INFO [StoreOpener-5117c7d66ce88a349ce6d6a8bdbf45a2-1 {}] regionserver.HStore(327): Store=5117c7d66ce88a349ce6d6a8bdbf45a2/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-04T15:52:26,122 DEBUG [RS_OPEN_REGION-regionserver/a21b6491b371:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 5117c7d66ce88a349ce6d6a8bdbf45a2 2024-12-04T15:52:26,123 DEBUG [RS_OPEN_REGION-regionserver/a21b6491b371:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41963/user/jenkins/test-data/14cf8f91-458c-f70a-e415-22f2de810d37/data/default/TestHBaseWalOnEC/5117c7d66ce88a349ce6d6a8bdbf45a2 2024-12-04T15:52:26,123 DEBUG [RS_OPEN_REGION-regionserver/a21b6491b371:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41963/user/jenkins/test-data/14cf8f91-458c-f70a-e415-22f2de810d37/data/default/TestHBaseWalOnEC/5117c7d66ce88a349ce6d6a8bdbf45a2 2024-12-04T15:52:26,124 DEBUG [RS_OPEN_REGION-regionserver/a21b6491b371:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 5117c7d66ce88a349ce6d6a8bdbf45a2 2024-12-04T15:52:26,124 DEBUG [RS_OPEN_REGION-regionserver/a21b6491b371:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 5117c7d66ce88a349ce6d6a8bdbf45a2 2024-12-04T15:52:26,126 DEBUG [RS_OPEN_REGION-regionserver/a21b6491b371:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 5117c7d66ce88a349ce6d6a8bdbf45a2 2024-12-04T15:52:26,128 DEBUG [RS_OPEN_REGION-regionserver/a21b6491b371:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41963/user/jenkins/test-data/14cf8f91-458c-f70a-e415-22f2de810d37/data/default/TestHBaseWalOnEC/5117c7d66ce88a349ce6d6a8bdbf45a2/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-04T15:52:26,128 INFO [RS_OPEN_REGION-regionserver/a21b6491b371:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 5117c7d66ce88a349ce6d6a8bdbf45a2; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=72936204, jitterRate=0.0868341326713562}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-04T15:52:26,128 DEBUG [RS_OPEN_REGION-regionserver/a21b6491b371:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 5117c7d66ce88a349ce6d6a8bdbf45a2 2024-12-04T15:52:26,129 DEBUG [RS_OPEN_REGION-regionserver/a21b6491b371:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 5117c7d66ce88a349ce6d6a8bdbf45a2: Running coprocessor pre-open hook at 1733327546118Writing region info on filesystem at 1733327546118Initializing all the Stores at 1733327546119 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733327546119Cleaning up temporary data from old regions at 1733327546124 (+5 ms)Running coprocessor post-open hooks at 1733327546129 (+5 ms)Region opened successfully at 1733327546129 2024-12-04T15:52:26,131 INFO [RS_OPEN_REGION-regionserver/a21b6491b371:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestHBaseWalOnEC,,1733327545754.5117c7d66ce88a349ce6d6a8bdbf45a2., pid=6, masterSystemTime=1733327546109 2024-12-04T15:52:26,135 DEBUG [RS_OPEN_REGION-regionserver/a21b6491b371:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestHBaseWalOnEC,,1733327545754.5117c7d66ce88a349ce6d6a8bdbf45a2. 2024-12-04T15:52:26,135 INFO [RS_OPEN_REGION-regionserver/a21b6491b371:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestHBaseWalOnEC,,1733327545754.5117c7d66ce88a349ce6d6a8bdbf45a2. 2024-12-04T15:52:26,136 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=5117c7d66ce88a349ce6d6a8bdbf45a2, regionState=OPEN, openSeqNum=2, regionLocation=a21b6491b371,44623,1733327544652 2024-12-04T15:52:26,139 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-10-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 5117c7d66ce88a349ce6d6a8bdbf45a2, server=a21b6491b371,44623,1733327544652 because future has completed 2024-12-04T15:52:26,145 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-04T15:52:26,145 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 5117c7d66ce88a349ce6d6a8bdbf45a2, server=a21b6491b371,44623,1733327544652 in 186 msec 2024-12-04T15:52:26,149 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-04T15:52:26,149 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=5117c7d66ce88a349ce6d6a8bdbf45a2, ASSIGN in 349 msec 2024-12-04T15:52:26,150 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-04T15:52:26,150 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733327546150"}]},"ts":"1733327546150"} 2024-12-04T15:52:26,153 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLED in hbase:meta 2024-12-04T15:52:26,155 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_POST_OPERATION 2024-12-04T15:52:26,157 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC in 401 msec 2024-12-04T15:52:26,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35791 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-04T15:52:26,388 INFO [RPCClient-NioEventLoopGroup-6-9 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestHBaseWalOnEC completed 2024-12-04T15:52:26,388 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table TestHBaseWalOnEC get assigned. Timeout = 60000ms 2024-12-04T15:52:26,388 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-04T15:52:26,392 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table TestHBaseWalOnEC assigned to meta. Checking AM states. 2024-12-04T15:52:26,392 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-04T15:52:26,392 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table TestHBaseWalOnEC assigned. 2024-12-04T15:52:26,396 DEBUG [RPCClient-NioEventLoopGroup-6-8 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestHBaseWalOnEC', row='row', locateType=CURRENT is [region=TestHBaseWalOnEC,,1733327545754.5117c7d66ce88a349ce6d6a8bdbf45a2., hostname=a21b6491b371,44623,1733327544652, seqNum=2] 2024-12-04T15:52:26,396 DEBUG [RPCClient-NioEventLoopGroup-6-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-04T15:52:26,398 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47034, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-04T15:52:26,401 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35791 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestHBaseWalOnEC 2024-12-04T15:52:26,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35791 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC 2024-12-04T15:52:26,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35791 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-04T15:52:26,405 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_PREPARE 2024-12-04T15:52:26,406 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-04T15:52:26,407 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-04T15:52:26,511 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-04T15:52:26,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35791 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-04T15:52:26,519 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T15:52:26,561 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44623 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-12-04T15:52:26,562 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a21b6491b371:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestHBaseWalOnEC,,1733327545754.5117c7d66ce88a349ce6d6a8bdbf45a2. 2024-12-04T15:52:26,562 INFO [RS_FLUSH_OPERATIONS-regionserver/a21b6491b371:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing 5117c7d66ce88a349ce6d6a8bdbf45a2 1/1 column families, dataSize=32 B heapSize=360 B 2024-12-04T15:52:26,580 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a21b6491b371:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41963/user/jenkins/test-data/14cf8f91-458c-f70a-e415-22f2de810d37/data/default/TestHBaseWalOnEC/5117c7d66ce88a349ce6d6a8bdbf45a2/.tmp/cf/3db5d50d283749179b0d230d730eb396 is 36, key is row/cf:cq/1733327546399/Put/seqid=0 2024-12-04T15:52:26,582 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T15:52:26,583 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T15:52:26,589 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44125 is added to blk_1073741839_1015 (size=4787) 2024-12-04T15:52:26,590 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33377 is added to blk_1073741839_1015 (size=4787) 2024-12-04T15:52:26,590 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44907 is added to blk_1073741839_1015 (size=4787) 2024-12-04T15:52:26,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35791 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-04T15:52:26,991 INFO [RS_FLUSH_OPERATIONS-regionserver/a21b6491b371:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=32 B at sequenceid=5 (bloomFilter=false), to=hdfs://localhost:41963/user/jenkins/test-data/14cf8f91-458c-f70a-e415-22f2de810d37/data/default/TestHBaseWalOnEC/5117c7d66ce88a349ce6d6a8bdbf45a2/.tmp/cf/3db5d50d283749179b0d230d730eb396 2024-12-04T15:52:27,001 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a21b6491b371:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41963/user/jenkins/test-data/14cf8f91-458c-f70a-e415-22f2de810d37/data/default/TestHBaseWalOnEC/5117c7d66ce88a349ce6d6a8bdbf45a2/.tmp/cf/3db5d50d283749179b0d230d730eb396 as hdfs://localhost:41963/user/jenkins/test-data/14cf8f91-458c-f70a-e415-22f2de810d37/data/default/TestHBaseWalOnEC/5117c7d66ce88a349ce6d6a8bdbf45a2/cf/3db5d50d283749179b0d230d730eb396 2024-12-04T15:52:27,008 INFO [RS_FLUSH_OPERATIONS-regionserver/a21b6491b371:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41963/user/jenkins/test-data/14cf8f91-458c-f70a-e415-22f2de810d37/data/default/TestHBaseWalOnEC/5117c7d66ce88a349ce6d6a8bdbf45a2/cf/3db5d50d283749179b0d230d730eb396, entries=1, sequenceid=5, filesize=4.7 K 2024-12-04T15:52:27,010 INFO [RS_FLUSH_OPERATIONS-regionserver/a21b6491b371:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~32 B/32, heapSize ~344 B/344, currentSize=0 B/0 for 5117c7d66ce88a349ce6d6a8bdbf45a2 in 448ms, sequenceid=5, compaction requested=false 2024-12-04T15:52:27,010 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a21b6491b371:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for 5117c7d66ce88a349ce6d6a8bdbf45a2: 2024-12-04T15:52:27,010 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a21b6491b371:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestHBaseWalOnEC,,1733327545754.5117c7d66ce88a349ce6d6a8bdbf45a2. 2024-12-04T15:52:27,010 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a21b6491b371:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-12-04T15:52:27,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35791 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-12-04T15:52:27,017 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-12-04T15:52:27,017 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 607 msec 2024-12-04T15:52:27,021 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC in 617 msec 2024-12-04T15:52:27,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35791 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-04T15:52:27,037 INFO [RPCClient-NioEventLoopGroup-6-9 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestHBaseWalOnEC completed 2024-12-04T15:52:27,042 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-04T15:52:27,042 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-04T15:52:27,042 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-04T15:52:27,042 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-04T15:52:27,042 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-04T15:52:27,042 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-04T15:52:27,042 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-04T15:52:27,043 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1721991146, stopped=false 2024-12-04T15:52:27,043 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=a21b6491b371,35791,1733327544603 2024-12-04T15:52:27,044 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44623-0x1017e2d4b980001, quorum=127.0.0.1:57411, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-04T15:52:27,044 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35791-0x1017e2d4b980000, quorum=127.0.0.1:57411, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-04T15:52:27,044 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38269-0x1017e2d4b980002, quorum=127.0.0.1:57411, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-04T15:52:27,044 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41071-0x1017e2d4b980003, quorum=127.0.0.1:57411, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-04T15:52:27,044 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44623-0x1017e2d4b980001, quorum=127.0.0.1:57411, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T15:52:27,044 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35791-0x1017e2d4b980000, quorum=127.0.0.1:57411, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T15:52:27,044 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41071-0x1017e2d4b980003, quorum=127.0.0.1:57411, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T15:52:27,044 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38269-0x1017e2d4b980002, quorum=127.0.0.1:57411, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T15:52:27,044 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-04T15:52:27,045 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-04T15:52:27,045 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-04T15:52:27,045 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:35791-0x1017e2d4b980000, quorum=127.0.0.1:57411, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-04T15:52:27,045 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-04T15:52:27,045 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:41071-0x1017e2d4b980003, quorum=127.0.0.1:57411, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-04T15:52:27,045 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:38269-0x1017e2d4b980002, quorum=127.0.0.1:57411, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-04T15:52:27,045 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'a21b6491b371,44623,1733327544652' ***** 2024-12-04T15:52:27,046 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-04T15:52:27,046 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'a21b6491b371,38269,1733327544683' ***** 2024-12-04T15:52:27,046 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-04T15:52:27,046 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'a21b6491b371,41071,1733327544712' ***** 2024-12-04T15:52:27,046 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-04T15:52:27,046 INFO [RS:0;a21b6491b371:44623 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-04T15:52:27,046 INFO [RS:1;a21b6491b371:38269 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-04T15:52:27,046 INFO [RS:0;a21b6491b371:44623 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-04T15:52:27,046 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-04T15:52:27,046 INFO [RS:0;a21b6491b371:44623 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-04T15:52:27,046 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-04T15:52:27,046 INFO [RS:0;a21b6491b371:44623 {}] regionserver.HRegionServer(3091): Received CLOSE for 5117c7d66ce88a349ce6d6a8bdbf45a2 2024-12-04T15:52:27,046 INFO [RS:2;a21b6491b371:41071 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-04T15:52:27,046 INFO [RS:1;a21b6491b371:38269 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-04T15:52:27,047 INFO [RS:2;a21b6491b371:41071 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-04T15:52:27,047 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-04T15:52:27,047 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:44623-0x1017e2d4b980001, quorum=127.0.0.1:57411, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-04T15:52:27,047 INFO [RS:1;a21b6491b371:38269 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-04T15:52:27,047 INFO [RS:0;a21b6491b371:44623 {}] regionserver.HRegionServer(959): stopping server a21b6491b371,44623,1733327544652 2024-12-04T15:52:27,047 INFO [RS:2;a21b6491b371:41071 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-04T15:52:27,047 INFO [RS:1;a21b6491b371:38269 {}] regionserver.HRegionServer(959): stopping server a21b6491b371,38269,1733327544683 2024-12-04T15:52:27,047 INFO [RS:0;a21b6491b371:44623 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-04T15:52:27,047 INFO [RS:2;a21b6491b371:41071 {}] regionserver.HRegionServer(959): stopping server a21b6491b371,41071,1733327544712 2024-12-04T15:52:27,047 INFO [RS:1;a21b6491b371:38269 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-04T15:52:27,047 INFO [RS:2;a21b6491b371:41071 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-04T15:52:27,047 INFO [RS:0;a21b6491b371:44623 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;a21b6491b371:44623. 2024-12-04T15:52:27,047 INFO [RS:1;a21b6491b371:38269 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;a21b6491b371:38269. 2024-12-04T15:52:27,047 INFO [RS:2;a21b6491b371:41071 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:2;a21b6491b371:41071. 2024-12-04T15:52:27,047 DEBUG [RS_CLOSE_REGION-regionserver/a21b6491b371:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 5117c7d66ce88a349ce6d6a8bdbf45a2, disabling compactions & flushes 2024-12-04T15:52:27,047 DEBUG [RS:0;a21b6491b371:44623 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-04T15:52:27,047 INFO [RS_CLOSE_REGION-regionserver/a21b6491b371:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1733327545754.5117c7d66ce88a349ce6d6a8bdbf45a2. 2024-12-04T15:52:27,047 DEBUG [RS:1;a21b6491b371:38269 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-04T15:52:27,047 DEBUG [RS:0;a21b6491b371:44623 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-04T15:52:27,047 DEBUG [RS:2;a21b6491b371:41071 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-04T15:52:27,047 DEBUG [RS:1;a21b6491b371:38269 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-04T15:52:27,047 DEBUG [RS_CLOSE_REGION-regionserver/a21b6491b371:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1733327545754.5117c7d66ce88a349ce6d6a8bdbf45a2. 2024-12-04T15:52:27,047 DEBUG [RS:2;a21b6491b371:41071 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-04T15:52:27,047 DEBUG [RS_CLOSE_REGION-regionserver/a21b6491b371:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1733327545754.5117c7d66ce88a349ce6d6a8bdbf45a2. after waiting 0 ms 2024-12-04T15:52:27,048 INFO [RS:0;a21b6491b371:44623 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-12-04T15:52:27,048 INFO [RS:1;a21b6491b371:38269 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-04T15:52:27,048 DEBUG [RS_CLOSE_REGION-regionserver/a21b6491b371:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1733327545754.5117c7d66ce88a349ce6d6a8bdbf45a2. 2024-12-04T15:52:27,048 DEBUG [RS:0;a21b6491b371:44623 {}] regionserver.HRegionServer(1325): Online Regions={5117c7d66ce88a349ce6d6a8bdbf45a2=TestHBaseWalOnEC,,1733327545754.5117c7d66ce88a349ce6d6a8bdbf45a2.} 2024-12-04T15:52:27,048 INFO [RS:2;a21b6491b371:41071 {}] regionserver.HRegionServer(976): stopping server a21b6491b371,41071,1733327544712; all regions closed. 2024-12-04T15:52:27,048 INFO [RS:1;a21b6491b371:38269 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-04T15:52:27,048 INFO [RS:1;a21b6491b371:38269 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-04T15:52:27,048 DEBUG [RS:0;a21b6491b371:44623 {}] regionserver.HRegionServer(1351): Waiting on 5117c7d66ce88a349ce6d6a8bdbf45a2 2024-12-04T15:52:27,048 INFO [RS:1;a21b6491b371:38269 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-04T15:52:27,048 INFO [RS:1;a21b6491b371:38269 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-12-04T15:52:27,048 DEBUG [RS:1;a21b6491b371:38269 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-12-04T15:52:27,048 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T15:52:27,048 DEBUG [RS:1;a21b6491b371:38269 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-12-04T15:52:27,049 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T15:52:27,049 DEBUG [RS_CLOSE_META-regionserver/a21b6491b371:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-04T15:52:27,049 INFO [RS_CLOSE_META-regionserver/a21b6491b371:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-04T15:52:27,049 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T15:52:27,049 DEBUG [RS_CLOSE_META-regionserver/a21b6491b371:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-04T15:52:27,049 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T15:52:27,049 DEBUG [RS_CLOSE_META-regionserver/a21b6491b371:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-04T15:52:27,049 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T15:52:27,049 DEBUG [RS_CLOSE_META-regionserver/a21b6491b371:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-04T15:52:27,049 INFO [RS_CLOSE_META-regionserver/a21b6491b371:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.34 KB heapSize=3.38 KB 2024-12-04T15:52:27,052 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33377 is added to blk_1073741833_1009 (size=93) 2024-12-04T15:52:27,052 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44125 is added to blk_1073741833_1009 (size=93) 2024-12-04T15:52:27,053 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44907 is added to blk_1073741833_1009 (size=93) 2024-12-04T15:52:27,055 DEBUG [RS_CLOSE_REGION-regionserver/a21b6491b371:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41963/user/jenkins/test-data/14cf8f91-458c-f70a-e415-22f2de810d37/data/default/TestHBaseWalOnEC/5117c7d66ce88a349ce6d6a8bdbf45a2/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-04T15:52:27,056 INFO [RS_CLOSE_REGION-regionserver/a21b6491b371:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1733327545754.5117c7d66ce88a349ce6d6a8bdbf45a2. 2024-12-04T15:52:27,056 DEBUG [RS_CLOSE_REGION-regionserver/a21b6491b371:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 5117c7d66ce88a349ce6d6a8bdbf45a2: Waiting for close lock at 1733327547047Running coprocessor pre-close hooks at 1733327547047Disabling compacts and flushes for region at 1733327547047Disabling writes for close at 1733327547048 (+1 ms)Writing region close event to WAL at 1733327547049 (+1 ms)Running coprocessor post-close hooks at 1733327547056 (+7 ms)Closed at 1733327547056 2024-12-04T15:52:27,057 DEBUG [RS_CLOSE_REGION-regionserver/a21b6491b371:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestHBaseWalOnEC,,1733327545754.5117c7d66ce88a349ce6d6a8bdbf45a2. 2024-12-04T15:52:27,060 DEBUG [RS:2;a21b6491b371:41071 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/14cf8f91-458c-f70a-e415-22f2de810d37/oldWALs 2024-12-04T15:52:27,060 INFO [RS:2;a21b6491b371:41071 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog a21b6491b371%2C41071%2C1733327544712:(num 1733327545152) 2024-12-04T15:52:27,060 DEBUG [RS:2;a21b6491b371:41071 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-04T15:52:27,060 INFO [RS:2;a21b6491b371:41071 {}] regionserver.LeaseManager(133): Closed leases 2024-12-04T15:52:27,060 INFO [RS:2;a21b6491b371:41071 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-04T15:52:27,060 INFO [RS:2;a21b6491b371:41071 {}] hbase.ChoreService(370): Chore service for: regionserver/a21b6491b371:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-04T15:52:27,060 INFO [RS:2;a21b6491b371:41071 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-04T15:52:27,060 INFO [regionserver/a21b6491b371:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-04T15:52:27,060 INFO [RS:2;a21b6491b371:41071 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-04T15:52:27,060 INFO [RS:2;a21b6491b371:41071 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-04T15:52:27,060 INFO [RS:2;a21b6491b371:41071 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-04T15:52:27,061 INFO [RS:2;a21b6491b371:41071 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:41071 2024-12-04T15:52:27,062 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35791-0x1017e2d4b980000, quorum=127.0.0.1:57411, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-04T15:52:27,062 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41071-0x1017e2d4b980003, quorum=127.0.0.1:57411, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/a21b6491b371,41071,1733327544712 2024-12-04T15:52:27,062 INFO [RS:2;a21b6491b371:41071 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-04T15:52:27,064 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [a21b6491b371,41071,1733327544712] 2024-12-04T15:52:27,067 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/a21b6491b371,41071,1733327544712 already deleted, retry=false 2024-12-04T15:52:27,067 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; a21b6491b371,41071,1733327544712 expired; onlineServers=2 2024-12-04T15:52:27,070 DEBUG [RS_CLOSE_META-regionserver/a21b6491b371:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41963/user/jenkins/test-data/14cf8f91-458c-f70a-e415-22f2de810d37/data/hbase/meta/1588230740/.tmp/info/e5004c95414f4139b5e826a73f4e4f0d is 153, key is TestHBaseWalOnEC,,1733327545754.5117c7d66ce88a349ce6d6a8bdbf45a2./info:regioninfo/1733327546136/Put/seqid=0 2024-12-04T15:52:27,077 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44125 is added to blk_1073741840_1016 (size=6637) 2024-12-04T15:52:27,078 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44907 is added to blk_1073741840_1016 (size=6637) 2024-12-04T15:52:27,078 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33377 is added to blk_1073741840_1016 (size=6637) 2024-12-04T15:52:27,079 INFO [regionserver/a21b6491b371:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-04T15:52:27,079 INFO [regionserver/a21b6491b371:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-04T15:52:27,081 INFO [RS_CLOSE_META-regionserver/a21b6491b371:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.18 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:41963/user/jenkins/test-data/14cf8f91-458c-f70a-e415-22f2de810d37/data/hbase/meta/1588230740/.tmp/info/e5004c95414f4139b5e826a73f4e4f0d 2024-12-04T15:52:27,107 DEBUG [RS_CLOSE_META-regionserver/a21b6491b371:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41963/user/jenkins/test-data/14cf8f91-458c-f70a-e415-22f2de810d37/data/hbase/meta/1588230740/.tmp/ns/e4882a54c0914ce7ade1067b9668809a is 43, key is default/ns:d/1733327545642/Put/seqid=0 2024-12-04T15:52:27,112 INFO [regionserver/a21b6491b371:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-04T15:52:27,113 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44125 is added to blk_1073741841_1017 (size=5153) 2024-12-04T15:52:27,114 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33377 is added to blk_1073741841_1017 (size=5153) 2024-12-04T15:52:27,114 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44907 is added to blk_1073741841_1017 (size=5153) 2024-12-04T15:52:27,117 INFO [RS_CLOSE_META-regionserver/a21b6491b371:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:41963/user/jenkins/test-data/14cf8f91-458c-f70a-e415-22f2de810d37/data/hbase/meta/1588230740/.tmp/ns/e4882a54c0914ce7ade1067b9668809a 2024-12-04T15:52:27,118 INFO [regionserver/a21b6491b371:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-04T15:52:27,125 INFO [regionserver/a21b6491b371:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-04T15:52:27,142 DEBUG [RS_CLOSE_META-regionserver/a21b6491b371:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41963/user/jenkins/test-data/14cf8f91-458c-f70a-e415-22f2de810d37/data/hbase/meta/1588230740/.tmp/table/6384fcc610da45e4b1998dbd76807839 is 52, key is TestHBaseWalOnEC/table:state/1733327546150/Put/seqid=0 2024-12-04T15:52:27,149 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33377 is added to blk_1073741842_1018 (size=5249) 2024-12-04T15:52:27,150 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44907 is added to blk_1073741842_1018 (size=5249) 2024-12-04T15:52:27,150 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44125 is added to blk_1073741842_1018 (size=5249) 2024-12-04T15:52:27,150 INFO [RS_CLOSE_META-regionserver/a21b6491b371:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=96 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:41963/user/jenkins/test-data/14cf8f91-458c-f70a-e415-22f2de810d37/data/hbase/meta/1588230740/.tmp/table/6384fcc610da45e4b1998dbd76807839 2024-12-04T15:52:27,158 DEBUG [RS_CLOSE_META-regionserver/a21b6491b371:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41963/user/jenkins/test-data/14cf8f91-458c-f70a-e415-22f2de810d37/data/hbase/meta/1588230740/.tmp/info/e5004c95414f4139b5e826a73f4e4f0d as hdfs://localhost:41963/user/jenkins/test-data/14cf8f91-458c-f70a-e415-22f2de810d37/data/hbase/meta/1588230740/info/e5004c95414f4139b5e826a73f4e4f0d 2024-12-04T15:52:27,164 INFO [RS:2;a21b6491b371:41071 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-04T15:52:27,164 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41071-0x1017e2d4b980003, quorum=127.0.0.1:57411, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-04T15:52:27,164 INFO [RS:2;a21b6491b371:41071 {}] regionserver.HRegionServer(1031): Exiting; stopping=a21b6491b371,41071,1733327544712; zookeeper connection closed. 2024-12-04T15:52:27,164 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41071-0x1017e2d4b980003, quorum=127.0.0.1:57411, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-04T15:52:27,164 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@4b1d251 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@4b1d251 2024-12-04T15:52:27,165 INFO [RS_CLOSE_META-regionserver/a21b6491b371:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41963/user/jenkins/test-data/14cf8f91-458c-f70a-e415-22f2de810d37/data/hbase/meta/1588230740/info/e5004c95414f4139b5e826a73f4e4f0d, entries=10, sequenceid=11, filesize=6.5 K 2024-12-04T15:52:27,166 DEBUG [RS_CLOSE_META-regionserver/a21b6491b371:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41963/user/jenkins/test-data/14cf8f91-458c-f70a-e415-22f2de810d37/data/hbase/meta/1588230740/.tmp/ns/e4882a54c0914ce7ade1067b9668809a as hdfs://localhost:41963/user/jenkins/test-data/14cf8f91-458c-f70a-e415-22f2de810d37/data/hbase/meta/1588230740/ns/e4882a54c0914ce7ade1067b9668809a 2024-12-04T15:52:27,173 INFO [RS_CLOSE_META-regionserver/a21b6491b371:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41963/user/jenkins/test-data/14cf8f91-458c-f70a-e415-22f2de810d37/data/hbase/meta/1588230740/ns/e4882a54c0914ce7ade1067b9668809a, entries=2, sequenceid=11, filesize=5.0 K 2024-12-04T15:52:27,174 DEBUG [RS_CLOSE_META-regionserver/a21b6491b371:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41963/user/jenkins/test-data/14cf8f91-458c-f70a-e415-22f2de810d37/data/hbase/meta/1588230740/.tmp/table/6384fcc610da45e4b1998dbd76807839 as hdfs://localhost:41963/user/jenkins/test-data/14cf8f91-458c-f70a-e415-22f2de810d37/data/hbase/meta/1588230740/table/6384fcc610da45e4b1998dbd76807839 2024-12-04T15:52:27,181 INFO [RS_CLOSE_META-regionserver/a21b6491b371:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41963/user/jenkins/test-data/14cf8f91-458c-f70a-e415-22f2de810d37/data/hbase/meta/1588230740/table/6384fcc610da45e4b1998dbd76807839, entries=2, sequenceid=11, filesize=5.1 K 2024-12-04T15:52:27,182 INFO [RS_CLOSE_META-regionserver/a21b6491b371:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 133ms, sequenceid=11, compaction requested=false 2024-12-04T15:52:27,187 DEBUG [RS_CLOSE_META-regionserver/a21b6491b371:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41963/user/jenkins/test-data/14cf8f91-458c-f70a-e415-22f2de810d37/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-12-04T15:52:27,188 DEBUG [RS_CLOSE_META-regionserver/a21b6491b371:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-04T15:52:27,188 INFO [RS_CLOSE_META-regionserver/a21b6491b371:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-04T15:52:27,188 DEBUG [RS_CLOSE_META-regionserver/a21b6491b371:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733327547048Running coprocessor pre-close hooks at 1733327547048Disabling compacts and flushes for region at 1733327547048Disabling writes for close at 1733327547049 (+1 ms)Obtaining lock to block concurrent updates at 1733327547049Preparing flush snapshotting stores in 1588230740 at 1733327547049Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1377, getHeapSize=3392, getOffHeapSize=0, getCellsCount=14 at 1733327547050 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1733327547051 (+1 ms)Flushing 1588230740/info: creating writer at 1733327547051Flushing 1588230740/info: appending metadata at 1733327547070 (+19 ms)Flushing 1588230740/info: closing flushed file at 1733327547070Flushing 1588230740/ns: creating writer at 1733327547089 (+19 ms)Flushing 1588230740/ns: appending metadata at 1733327547106 (+17 ms)Flushing 1588230740/ns: closing flushed file at 1733327547106Flushing 1588230740/table: creating writer at 1733327547125 (+19 ms)Flushing 1588230740/table: appending metadata at 1733327547141 (+16 ms)Flushing 1588230740/table: closing flushed file at 1733327547141Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6a1987d0: reopening flushed file at 1733327547157 (+16 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@53f5a22f: reopening flushed file at 1733327547165 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@20a0aed: reopening flushed file at 1733327547173 (+8 ms)Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 133ms, sequenceid=11, compaction requested=false at 1733327547182 (+9 ms)Writing region close event to WAL at 1733327547184 (+2 ms)Running coprocessor post-close hooks at 1733327547188 (+4 ms)Closed at 1733327547188 2024-12-04T15:52:27,188 DEBUG [RS_CLOSE_META-regionserver/a21b6491b371:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-04T15:52:27,248 INFO [RS:0;a21b6491b371:44623 {}] regionserver.HRegionServer(976): stopping server a21b6491b371,44623,1733327544652; all regions closed. 2024-12-04T15:52:27,248 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T15:52:27,249 INFO [RS:1;a21b6491b371:38269 {}] regionserver.HRegionServer(976): stopping server a21b6491b371,38269,1733327544683; all regions closed. 2024-12-04T15:52:27,249 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T15:52:27,249 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T15:52:27,249 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T15:52:27,249 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T15:52:27,249 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T15:52:27,249 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T15:52:27,249 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T15:52:27,249 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T15:52:27,250 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T15:52:27,253 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33377 is added to blk_1073741836_1012 (size=2751) 2024-12-04T15:52:27,254 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44907 is added to blk_1073741834_1010 (size=1298) 2024-12-04T15:52:27,254 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44125 is added to blk_1073741834_1010 (size=1298) 2024-12-04T15:52:27,254 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33377 is added to blk_1073741834_1010 (size=1298) 2024-12-04T15:52:27,254 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44907 is added to blk_1073741836_1012 (size=2751) 2024-12-04T15:52:27,254 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44125 is added to blk_1073741836_1012 (size=2751) 2024-12-04T15:52:27,257 DEBUG [RS:0;a21b6491b371:44623 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/14cf8f91-458c-f70a-e415-22f2de810d37/oldWALs 2024-12-04T15:52:27,257 INFO [RS:0;a21b6491b371:44623 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog a21b6491b371%2C44623%2C1733327544652:(num 1733327545154) 2024-12-04T15:52:27,257 DEBUG [RS:0;a21b6491b371:44623 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-04T15:52:27,257 INFO [RS:0;a21b6491b371:44623 {}] regionserver.LeaseManager(133): Closed leases 2024-12-04T15:52:27,257 DEBUG [RS:1;a21b6491b371:38269 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/14cf8f91-458c-f70a-e415-22f2de810d37/oldWALs 2024-12-04T15:52:27,257 INFO [RS:1;a21b6491b371:38269 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog a21b6491b371%2C38269%2C1733327544683.meta:.meta(num 1733327545578) 2024-12-04T15:52:27,257 INFO [RS:0;a21b6491b371:44623 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-04T15:52:27,258 INFO [RS:0;a21b6491b371:44623 {}] hbase.ChoreService(370): Chore service for: regionserver/a21b6491b371:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-04T15:52:27,258 INFO [RS:0;a21b6491b371:44623 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-04T15:52:27,258 INFO [regionserver/a21b6491b371:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-04T15:52:27,258 INFO [RS:0;a21b6491b371:44623 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-04T15:52:27,258 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T15:52:27,258 INFO [RS:0;a21b6491b371:44623 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-04T15:52:27,258 INFO [RS:0;a21b6491b371:44623 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-04T15:52:27,258 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T15:52:27,258 INFO [RS:0;a21b6491b371:44623 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:44623 2024-12-04T15:52:27,258 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T15:52:27,259 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T15:52:27,259 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T15:52:27,260 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44623-0x1017e2d4b980001, quorum=127.0.0.1:57411, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/a21b6491b371,44623,1733327544652 2024-12-04T15:52:27,260 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35791-0x1017e2d4b980000, quorum=127.0.0.1:57411, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-04T15:52:27,260 INFO [RS:0;a21b6491b371:44623 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-04T15:52:27,261 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [a21b6491b371,44623,1733327544652] 2024-12-04T15:52:27,262 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44907 is added to blk_1073741835_1011 (size=93) 2024-12-04T15:52:27,263 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/a21b6491b371,44623,1733327544652 already deleted, retry=false 2024-12-04T15:52:27,263 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; a21b6491b371,44623,1733327544652 expired; onlineServers=1 2024-12-04T15:52:27,263 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44125 is added to blk_1073741835_1011 (size=93) 2024-12-04T15:52:27,263 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33377 is added to blk_1073741835_1011 (size=93) 2024-12-04T15:52:27,266 DEBUG [RS:1;a21b6491b371:38269 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/14cf8f91-458c-f70a-e415-22f2de810d37/oldWALs 2024-12-04T15:52:27,266 INFO [RS:1;a21b6491b371:38269 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog a21b6491b371%2C38269%2C1733327544683:(num 1733327545166) 2024-12-04T15:52:27,266 DEBUG [RS:1;a21b6491b371:38269 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-04T15:52:27,266 INFO [RS:1;a21b6491b371:38269 {}] regionserver.LeaseManager(133): Closed leases 2024-12-04T15:52:27,266 INFO [RS:1;a21b6491b371:38269 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-04T15:52:27,266 INFO [RS:1;a21b6491b371:38269 {}] hbase.ChoreService(370): Chore service for: regionserver/a21b6491b371:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-04T15:52:27,266 INFO [RS:1;a21b6491b371:38269 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-04T15:52:27,266 INFO [regionserver/a21b6491b371:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-04T15:52:27,266 INFO [RS:1;a21b6491b371:38269 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:38269 2024-12-04T15:52:27,269 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38269-0x1017e2d4b980002, quorum=127.0.0.1:57411, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/a21b6491b371,38269,1733327544683 2024-12-04T15:52:27,269 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35791-0x1017e2d4b980000, quorum=127.0.0.1:57411, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-04T15:52:27,269 INFO [RS:1;a21b6491b371:38269 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-04T15:52:27,270 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [a21b6491b371,38269,1733327544683] 2024-12-04T15:52:27,271 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/a21b6491b371,38269,1733327544683 already deleted, retry=false 2024-12-04T15:52:27,272 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; a21b6491b371,38269,1733327544683 expired; onlineServers=0 2024-12-04T15:52:27,272 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'a21b6491b371,35791,1733327544603' ***** 2024-12-04T15:52:27,272 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-04T15:52:27,272 INFO [M:0;a21b6491b371:35791 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-04T15:52:27,272 INFO [M:0;a21b6491b371:35791 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-04T15:52:27,272 DEBUG [M:0;a21b6491b371:35791 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-04T15:52:27,272 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-04T15:52:27,272 DEBUG [M:0;a21b6491b371:35791 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-04T15:52:27,272 DEBUG [master/a21b6491b371:0:becomeActiveMaster-HFileCleaner.large.0-1733327544916 {}] cleaner.HFileCleaner(306): Exit Thread[master/a21b6491b371:0:becomeActiveMaster-HFileCleaner.large.0-1733327544916,5,FailOnTimeoutGroup] 2024-12-04T15:52:27,272 DEBUG [master/a21b6491b371:0:becomeActiveMaster-HFileCleaner.small.0-1733327544916 {}] cleaner.HFileCleaner(306): Exit Thread[master/a21b6491b371:0:becomeActiveMaster-HFileCleaner.small.0-1733327544916,5,FailOnTimeoutGroup] 2024-12-04T15:52:27,272 INFO [M:0;a21b6491b371:35791 {}] hbase.ChoreService(370): Chore service for: master/a21b6491b371:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-04T15:52:27,272 INFO [M:0;a21b6491b371:35791 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-04T15:52:27,273 DEBUG [M:0;a21b6491b371:35791 {}] master.HMaster(1795): Stopping service threads 2024-12-04T15:52:27,273 INFO [M:0;a21b6491b371:35791 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-04T15:52:27,273 INFO [M:0;a21b6491b371:35791 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-04T15:52:27,273 INFO [M:0;a21b6491b371:35791 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-04T15:52:27,273 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-04T15:52:27,273 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35791-0x1017e2d4b980000, quorum=127.0.0.1:57411, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-04T15:52:27,274 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35791-0x1017e2d4b980000, quorum=127.0.0.1:57411, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T15:52:27,274 DEBUG [M:0;a21b6491b371:35791 {}] zookeeper.ZKUtil(347): master:35791-0x1017e2d4b980000, quorum=127.0.0.1:57411, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-04T15:52:27,274 WARN [M:0;a21b6491b371:35791 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-04T15:52:27,274 INFO [M:0;a21b6491b371:35791 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:41963/user/jenkins/test-data/14cf8f91-458c-f70a-e415-22f2de810d37/.lastflushedseqids 2024-12-04T15:52:27,277 WARN [IPC Server handler 1 on default port 41963 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-04T15:52:27,277 WARN [IPC Server handler 1 on default port 41963 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-04T15:52:27,277 WARN [IPC Server handler 1 on default port 41963 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-04T15:52:27,281 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33377 is added to blk_1073741843_1019 (size=127) 2024-12-04T15:52:27,282 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44125 is added to blk_1073741843_1019 (size=127) 2024-12-04T15:52:27,283 INFO [M:0;a21b6491b371:35791 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-04T15:52:27,283 INFO [M:0;a21b6491b371:35791 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-04T15:52:27,283 DEBUG [M:0;a21b6491b371:35791 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-04T15:52:27,283 INFO [M:0;a21b6491b371:35791 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-04T15:52:27,283 DEBUG [M:0;a21b6491b371:35791 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-04T15:52:27,283 DEBUG [M:0;a21b6491b371:35791 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-04T15:52:27,283 DEBUG [M:0;a21b6491b371:35791 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-04T15:52:27,283 INFO [M:0;a21b6491b371:35791 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=26.82 KB heapSize=34.11 KB 2024-12-04T15:52:27,301 DEBUG [M:0;a21b6491b371:35791 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41963/user/jenkins/test-data/14cf8f91-458c-f70a-e415-22f2de810d37/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/408469741230490db4d08a56c79a1900 is 82, key is hbase:meta,,1/info:regioninfo/1733327545623/Put/seqid=0 2024-12-04T15:52:27,308 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44125 is added to blk_1073741844_1020 (size=5672) 2024-12-04T15:52:27,309 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33377 is added to blk_1073741844_1020 (size=5672) 2024-12-04T15:52:27,309 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44907 is added to blk_1073741844_1020 (size=5672) 2024-12-04T15:52:27,309 INFO [M:0;a21b6491b371:35791 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:41963/user/jenkins/test-data/14cf8f91-458c-f70a-e415-22f2de810d37/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/408469741230490db4d08a56c79a1900 2024-12-04T15:52:27,333 DEBUG [M:0;a21b6491b371:35791 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41963/user/jenkins/test-data/14cf8f91-458c-f70a-e415-22f2de810d37/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/c6e6dd3b50244fc1a1fe712586801784 is 747, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1733327546156/Put/seqid=0 2024-12-04T15:52:27,334 WARN [IPC Server handler 2 on default port 41963 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-04T15:52:27,334 WARN [IPC Server handler 2 on default port 41963 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-04T15:52:27,335 WARN [IPC Server handler 2 on default port 41963 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-04T15:52:27,340 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33377 is added to blk_1073741845_1021 (size=6438) 2024-12-04T15:52:27,340 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44125 is added to blk_1073741845_1021 (size=6438) 2024-12-04T15:52:27,341 INFO [M:0;a21b6491b371:35791 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.13 KB at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:41963/user/jenkins/test-data/14cf8f91-458c-f70a-e415-22f2de810d37/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/c6e6dd3b50244fc1a1fe712586801784 2024-12-04T15:52:27,362 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44623-0x1017e2d4b980001, quorum=127.0.0.1:57411, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-04T15:52:27,362 INFO [RS:0;a21b6491b371:44623 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-04T15:52:27,362 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44623-0x1017e2d4b980001, quorum=127.0.0.1:57411, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-04T15:52:27,362 INFO [RS:0;a21b6491b371:44623 {}] regionserver.HRegionServer(1031): Exiting; stopping=a21b6491b371,44623,1733327544652; zookeeper connection closed. 2024-12-04T15:52:27,363 DEBUG [M:0;a21b6491b371:35791 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41963/user/jenkins/test-data/14cf8f91-458c-f70a-e415-22f2de810d37/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/296a8e4c1ea046d996fc0ad42492d109 is 69, key is a21b6491b371,38269,1733327544683/rs:state/1733327544972/Put/seqid=0 2024-12-04T15:52:27,363 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@23b1cafd {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@23b1cafd 2024-12-04T15:52:27,364 WARN [IPC Server handler 3 on default port 41963 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-04T15:52:27,364 WARN [IPC Server handler 3 on default port 41963 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-04T15:52:27,364 WARN [IPC Server handler 3 on default port 41963 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-04T15:52:27,368 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44125 is added to blk_1073741846_1022 (size=5294) 2024-12-04T15:52:27,368 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33377 is added to blk_1073741846_1022 (size=5294) 2024-12-04T15:52:27,369 INFO [M:0;a21b6491b371:35791 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=195 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:41963/user/jenkins/test-data/14cf8f91-458c-f70a-e415-22f2de810d37/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/296a8e4c1ea046d996fc0ad42492d109 2024-12-04T15:52:27,371 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38269-0x1017e2d4b980002, quorum=127.0.0.1:57411, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-04T15:52:27,371 INFO [RS:1;a21b6491b371:38269 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-04T15:52:27,371 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38269-0x1017e2d4b980002, quorum=127.0.0.1:57411, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-04T15:52:27,371 INFO [RS:1;a21b6491b371:38269 {}] regionserver.HRegionServer(1031): Exiting; stopping=a21b6491b371,38269,1733327544683; zookeeper connection closed. 2024-12-04T15:52:27,371 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@33ae791a {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@33ae791a 2024-12-04T15:52:27,372 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 3 regionserver(s) complete 2024-12-04T15:52:27,376 DEBUG [M:0;a21b6491b371:35791 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41963/user/jenkins/test-data/14cf8f91-458c-f70a-e415-22f2de810d37/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/408469741230490db4d08a56c79a1900 as hdfs://localhost:41963/user/jenkins/test-data/14cf8f91-458c-f70a-e415-22f2de810d37/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/408469741230490db4d08a56c79a1900 2024-12-04T15:52:27,382 INFO [M:0;a21b6491b371:35791 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41963/user/jenkins/test-data/14cf8f91-458c-f70a-e415-22f2de810d37/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/408469741230490db4d08a56c79a1900, entries=8, sequenceid=72, filesize=5.5 K 2024-12-04T15:52:27,384 DEBUG [M:0;a21b6491b371:35791 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41963/user/jenkins/test-data/14cf8f91-458c-f70a-e415-22f2de810d37/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/c6e6dd3b50244fc1a1fe712586801784 as hdfs://localhost:41963/user/jenkins/test-data/14cf8f91-458c-f70a-e415-22f2de810d37/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/c6e6dd3b50244fc1a1fe712586801784 2024-12-04T15:52:27,389 INFO [M:0;a21b6491b371:35791 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41963/user/jenkins/test-data/14cf8f91-458c-f70a-e415-22f2de810d37/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/c6e6dd3b50244fc1a1fe712586801784, entries=8, sequenceid=72, filesize=6.3 K 2024-12-04T15:52:27,390 DEBUG [M:0;a21b6491b371:35791 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41963/user/jenkins/test-data/14cf8f91-458c-f70a-e415-22f2de810d37/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/296a8e4c1ea046d996fc0ad42492d109 as hdfs://localhost:41963/user/jenkins/test-data/14cf8f91-458c-f70a-e415-22f2de810d37/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/296a8e4c1ea046d996fc0ad42492d109 2024-12-04T15:52:27,397 INFO [M:0;a21b6491b371:35791 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41963/user/jenkins/test-data/14cf8f91-458c-f70a-e415-22f2de810d37/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/296a8e4c1ea046d996fc0ad42492d109, entries=3, sequenceid=72, filesize=5.2 K 2024-12-04T15:52:27,398 INFO [M:0;a21b6491b371:35791 {}] regionserver.HRegion(3140): Finished flush of dataSize ~26.82 KB/27459, heapSize ~33.81 KB/34624, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 115ms, sequenceid=72, compaction requested=false 2024-12-04T15:52:27,399 INFO [M:0;a21b6491b371:35791 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-04T15:52:27,399 DEBUG [M:0;a21b6491b371:35791 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733327547283Disabling compacts and flushes for region at 1733327547283Disabling writes for close at 1733327547283Obtaining lock to block concurrent updates at 1733327547283Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733327547283Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=27459, getHeapSize=34864, getOffHeapSize=0, getCellsCount=85 at 1733327547284 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733327547285 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733327547285Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733327547301 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733327547301Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733327547316 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733327547333 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733327547333Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733327547347 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733327547362 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733327547362Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@620d4bb: reopening flushed file at 1733327547375 (+13 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@ae29fa7: reopening flushed file at 1733327547383 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@76e7e983: reopening flushed file at 1733327547389 (+6 ms)Finished flush of dataSize ~26.82 KB/27459, heapSize ~33.81 KB/34624, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 115ms, sequenceid=72, compaction requested=false at 1733327547398 (+9 ms)Writing region close event to WAL at 1733327547399 (+1 ms)Closed at 1733327547399 2024-12-04T15:52:27,400 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T15:52:27,400 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T15:52:27,400 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T15:52:27,400 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T15:52:27,400 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T15:52:27,402 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44907 is added to blk_1073741830_1006 (size=32662) 2024-12-04T15:52:27,402 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33377 is added to blk_1073741830_1006 (size=32662) 2024-12-04T15:52:27,403 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44125 is added to blk_1073741830_1006 (size=32662) 2024-12-04T15:52:27,403 INFO [M:0;a21b6491b371:35791 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-04T15:52:27,403 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-04T15:52:27,404 INFO [M:0;a21b6491b371:35791 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:35791 2024-12-04T15:52:27,404 INFO [M:0;a21b6491b371:35791 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-04T15:52:27,508 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35791-0x1017e2d4b980000, quorum=127.0.0.1:57411, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-04T15:52:27,508 INFO [M:0;a21b6491b371:35791 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-04T15:52:27,508 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35791-0x1017e2d4b980000, quorum=127.0.0.1:57411, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-04T15:52:27,510 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@49ebd318{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-04T15:52:27,510 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5298b047{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-04T15:52:27,511 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-04T15:52:27,511 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@23f0b9cf{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-04T15:52:27,511 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6a9a00ba{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/711447d9-579e-5d5a-6b73-9d10e101bda2/hadoop.log.dir/,STOPPED} 2024-12-04T15:52:27,512 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-04T15:52:27,512 WARN [BP-656230515-172.17.0.2-1733327543740 heartbeating to localhost/127.0.0.1:41963 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-04T15:52:27,512 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-04T15:52:27,512 WARN [BP-656230515-172.17.0.2-1733327543740 heartbeating to localhost/127.0.0.1:41963 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-656230515-172.17.0.2-1733327543740 (Datanode Uuid d113f785-32c4-438a-adec-08bdbf381286) service to localhost/127.0.0.1:41963 2024-12-04T15:52:27,513 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/711447d9-579e-5d5a-6b73-9d10e101bda2/cluster_96c775b1-ed2f-6758-0f30-956f656d285b/data/data5/current/BP-656230515-172.17.0.2-1733327543740 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-04T15:52:27,513 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/711447d9-579e-5d5a-6b73-9d10e101bda2/cluster_96c775b1-ed2f-6758-0f30-956f656d285b/data/data6/current/BP-656230515-172.17.0.2-1733327543740 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-04T15:52:27,514 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-04T15:52:27,517 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4b6c5f5d{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-04T15:52:27,517 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@62ba6a25{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-04T15:52:27,517 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-04T15:52:27,517 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5cb78ad1{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-04T15:52:27,517 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@47448499{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/711447d9-579e-5d5a-6b73-9d10e101bda2/hadoop.log.dir/,STOPPED} 2024-12-04T15:52:27,519 WARN [BP-656230515-172.17.0.2-1733327543740 heartbeating to localhost/127.0.0.1:41963 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-04T15:52:27,519 WARN [BP-656230515-172.17.0.2-1733327543740 heartbeating to localhost/127.0.0.1:41963 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-656230515-172.17.0.2-1733327543740 (Datanode Uuid a7ed0b89-1885-4902-861f-0903e48f53fb) service to localhost/127.0.0.1:41963 2024-12-04T15:52:27,520 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/711447d9-579e-5d5a-6b73-9d10e101bda2/cluster_96c775b1-ed2f-6758-0f30-956f656d285b/data/data3/current/BP-656230515-172.17.0.2-1733327543740 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-04T15:52:27,520 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/711447d9-579e-5d5a-6b73-9d10e101bda2/cluster_96c775b1-ed2f-6758-0f30-956f656d285b/data/data4/current/BP-656230515-172.17.0.2-1733327543740 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-04T15:52:27,520 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-04T15:52:27,520 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-04T15:52:27,520 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-04T15:52:27,525 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@29e2a23f{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-04T15:52:27,525 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4438aaf{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-04T15:52:27,525 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-04T15:52:27,525 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@39d5f2c6{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-04T15:52:27,526 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@cec6057{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/711447d9-579e-5d5a-6b73-9d10e101bda2/hadoop.log.dir/,STOPPED} 2024-12-04T15:52:27,531 WARN [BP-656230515-172.17.0.2-1733327543740 heartbeating to localhost/127.0.0.1:41963 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-04T15:52:27,531 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-04T15:52:27,531 WARN [BP-656230515-172.17.0.2-1733327543740 heartbeating to localhost/127.0.0.1:41963 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-656230515-172.17.0.2-1733327543740 (Datanode Uuid a28e23c7-0f19-40ec-a840-0e817ce154a5) service to localhost/127.0.0.1:41963 2024-12-04T15:52:27,531 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-04T15:52:27,532 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/711447d9-579e-5d5a-6b73-9d10e101bda2/cluster_96c775b1-ed2f-6758-0f30-956f656d285b/data/data1/current/BP-656230515-172.17.0.2-1733327543740 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-04T15:52:27,532 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/711447d9-579e-5d5a-6b73-9d10e101bda2/cluster_96c775b1-ed2f-6758-0f30-956f656d285b/data/data2/current/BP-656230515-172.17.0.2-1733327543740 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-04T15:52:27,532 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-04T15:52:27,538 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@22524515{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-04T15:52:27,539 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7c2ad186{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-04T15:52:27,539 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-04T15:52:27,539 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@40d3d129{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-04T15:52:27,539 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@309f022b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/711447d9-579e-5d5a-6b73-9d10e101bda2/hadoop.log.dir/,STOPPED} 2024-12-04T15:52:27,547 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-04T15:52:27,574 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-04T15:52:27,582 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestHBaseWalOnEC#testReadWrite[1] Thread=152 (was 90) - Thread LEAK? -, OpenFileDescriptor=520 (was 439) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=167 (was 181), ProcessCount=11 (was 11), AvailableMemoryMB=9410 (was 9576)