2024-12-11 09:50:44,713 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba 2024-12-11 09:50:44,726 main DEBUG Took 0.011707 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-12-11 09:50:44,727 main DEBUG PluginManager 'Core' found 129 plugins 2024-12-11 09:50:44,727 main DEBUG PluginManager 'Level' found 0 plugins 2024-12-11 09:50:44,728 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-12-11 09:50:44,730 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-11 09:50:44,745 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-12-11 09:50:44,756 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-11 09:50:44,757 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-11 09:50:44,758 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-11 09:50:44,758 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-11 09:50:44,759 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-11 09:50:44,759 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-11 09:50:44,760 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-11 09:50:44,760 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-11 09:50:44,760 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-11 09:50:44,761 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-11 09:50:44,761 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-11 09:50:44,761 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-11 09:50:44,762 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-11 09:50:44,762 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-11 09:50:44,763 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-11 09:50:44,763 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-11 09:50:44,763 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-11 09:50:44,763 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-11 09:50:44,764 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-11 09:50:44,764 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-11 09:50:44,764 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-11 09:50:44,765 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-11 09:50:44,765 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-11 09:50:44,765 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-11 09:50:44,766 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-11 09:50:44,766 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-12-11 09:50:44,767 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-11 09:50:44,768 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-12-11 09:50:44,770 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-12-11 09:50:44,771 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-12-11 09:50:44,773 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-12-11 09:50:44,774 main DEBUG PluginManager 'Converter' found 47 plugins 2024-12-11 09:50:44,784 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-12-11 09:50:44,787 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-12-11 09:50:44,788 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-12-11 09:50:44,789 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-12-11 09:50:44,790 main DEBUG createAppenders(={Console}) 2024-12-11 09:50:44,791 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba initialized 2024-12-11 09:50:44,791 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba 2024-12-11 09:50:44,791 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba OK. 2024-12-11 09:50:44,792 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-12-11 09:50:44,793 main DEBUG OutputStream closed 2024-12-11 09:50:44,793 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-12-11 09:50:44,793 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-12-11 09:50:44,794 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@49c7b90e OK 2024-12-11 09:50:44,876 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-12-11 09:50:44,878 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-12-11 09:50:44,879 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-12-11 09:50:44,880 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-12-11 09:50:44,881 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-12-11 09:50:44,881 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-12-11 09:50:44,882 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-12-11 09:50:44,882 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-12-11 09:50:44,883 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-12-11 09:50:44,883 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-12-11 09:50:44,884 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-12-11 09:50:44,884 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-12-11 09:50:44,884 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-12-11 09:50:44,885 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-12-11 09:50:44,885 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-12-11 09:50:44,885 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-12-11 09:50:44,885 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-12-11 09:50:44,886 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-12-11 09:50:44,888 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-11 09:50:44,888 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-logging/target/hbase-logging-4.0.0-alpha-1-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@35432107) with optional ClassLoader: null 2024-12-11 09:50:44,889 main DEBUG Shutdown hook enabled. Registering a new one. 2024-12-11 09:50:44,889 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@35432107] started OK. 2024-12-11T09:50:44,904 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC timeout: 26 mins 2024-12-11 09:50:44,907 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-12-11 09:50:44,907 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-11T09:50:45,189 DEBUG [main {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7ba4e899-2673-5e61-3f0e-a78f432d161e 2024-12-11T09:50:45,212 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7ba4e899-2673-5e61-3f0e-a78f432d161e/cluster_dfc4bfcd-800b-06f9-90c7-85deae558f04, deleteOnExit=true 2024-12-11T09:50:45,214 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7ba4e899-2673-5e61-3f0e-a78f432d161e/test.cache.data in system properties and HBase conf 2024-12-11T09:50:45,214 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7ba4e899-2673-5e61-3f0e-a78f432d161e/hadoop.tmp.dir in system properties and HBase conf 2024-12-11T09:50:45,215 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7ba4e899-2673-5e61-3f0e-a78f432d161e/hadoop.log.dir in system properties and HBase conf 2024-12-11T09:50:45,216 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7ba4e899-2673-5e61-3f0e-a78f432d161e/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-11T09:50:45,216 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7ba4e899-2673-5e61-3f0e-a78f432d161e/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-11T09:50:45,216 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-11T09:50:45,308 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-12-11T09:50:45,414 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-11T09:50:45,418 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7ba4e899-2673-5e61-3f0e-a78f432d161e/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-11T09:50:45,419 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7ba4e899-2673-5e61-3f0e-a78f432d161e/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-11T09:50:45,420 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7ba4e899-2673-5e61-3f0e-a78f432d161e/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-11T09:50:45,420 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7ba4e899-2673-5e61-3f0e-a78f432d161e/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-11T09:50:45,421 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7ba4e899-2673-5e61-3f0e-a78f432d161e/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-11T09:50:45,421 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7ba4e899-2673-5e61-3f0e-a78f432d161e/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-11T09:50:45,422 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7ba4e899-2673-5e61-3f0e-a78f432d161e/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-11T09:50:45,422 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7ba4e899-2673-5e61-3f0e-a78f432d161e/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-11T09:50:45,423 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7ba4e899-2673-5e61-3f0e-a78f432d161e/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-11T09:50:45,423 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7ba4e899-2673-5e61-3f0e-a78f432d161e/nfs.dump.dir in system properties and HBase conf 2024-12-11T09:50:45,424 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7ba4e899-2673-5e61-3f0e-a78f432d161e/java.io.tmpdir in system properties and HBase conf 2024-12-11T09:50:45,424 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7ba4e899-2673-5e61-3f0e-a78f432d161e/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-11T09:50:45,425 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7ba4e899-2673-5e61-3f0e-a78f432d161e/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-11T09:50:45,425 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7ba4e899-2673-5e61-3f0e-a78f432d161e/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-11T09:50:46,513 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-12-11T09:50:46,599 INFO [Time-limited test {}] log.Log(170): Logging initialized @2508ms to org.eclipse.jetty.util.log.Slf4jLog 2024-12-11T09:50:46,680 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-11T09:50:46,752 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-11T09:50:46,773 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-11T09:50:46,774 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-11T09:50:46,775 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-11T09:50:46,787 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-11T09:50:46,790 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@346b353e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7ba4e899-2673-5e61-3f0e-a78f432d161e/hadoop.log.dir/,AVAILABLE} 2024-12-11T09:50:46,791 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2566da3f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-11T09:50:46,964 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@44270346{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7ba4e899-2673-5e61-3f0e-a78f432d161e/java.io.tmpdir/jetty-localhost-34055-hadoop-hdfs-3_4_1-tests_jar-_-any-1047245157561762853/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-11T09:50:46,971 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@11292817{HTTP/1.1, (http/1.1)}{localhost:34055} 2024-12-11T09:50:46,971 INFO [Time-limited test {}] server.Server(415): Started @2881ms 2024-12-11T09:50:47,435 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-11T09:50:47,444 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-11T09:50:47,445 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-11T09:50:47,445 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-11T09:50:47,446 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-11T09:50:47,447 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7acd62f3{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7ba4e899-2673-5e61-3f0e-a78f432d161e/hadoop.log.dir/,AVAILABLE} 2024-12-11T09:50:47,448 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5a55babc{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-11T09:50:47,545 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1a2119f0{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7ba4e899-2673-5e61-3f0e-a78f432d161e/java.io.tmpdir/jetty-localhost-35609-hadoop-hdfs-3_4_1-tests_jar-_-any-17494060223861697864/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-11T09:50:47,546 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5f8b16a4{HTTP/1.1, (http/1.1)}{localhost:35609} 2024-12-11T09:50:47,546 INFO [Time-limited test {}] server.Server(415): Started @3457ms 2024-12-11T09:50:47,596 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-11T09:50:47,709 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-11T09:50:47,716 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-11T09:50:47,724 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-11T09:50:47,724 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-11T09:50:47,724 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-11T09:50:47,727 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4f372e8e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7ba4e899-2673-5e61-3f0e-a78f432d161e/hadoop.log.dir/,AVAILABLE} 2024-12-11T09:50:47,728 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@56f2bf79{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-11T09:50:47,858 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@26ca39fe{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7ba4e899-2673-5e61-3f0e-a78f432d161e/java.io.tmpdir/jetty-localhost-44255-hadoop-hdfs-3_4_1-tests_jar-_-any-12749448718112025373/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-11T09:50:47,858 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5ad305cc{HTTP/1.1, (http/1.1)}{localhost:44255} 2024-12-11T09:50:47,859 INFO [Time-limited test {}] server.Server(415): Started @3769ms 2024-12-11T09:50:47,861 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-11T09:50:47,900 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-11T09:50:47,903 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-11T09:50:47,904 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-11T09:50:47,905 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-11T09:50:47,905 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-11T09:50:47,906 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@cf3d394{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7ba4e899-2673-5e61-3f0e-a78f432d161e/hadoop.log.dir/,AVAILABLE} 2024-12-11T09:50:47,906 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1646e48a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-11T09:50:48,001 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4fcce8ef{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7ba4e899-2673-5e61-3f0e-a78f432d161e/java.io.tmpdir/jetty-localhost-44873-hadoop-hdfs-3_4_1-tests_jar-_-any-16976407404514980465/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-11T09:50:48,002 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4a241def{HTTP/1.1, (http/1.1)}{localhost:44873} 2024-12-11T09:50:48,002 INFO [Time-limited test {}] server.Server(415): Started @3912ms 2024-12-11T09:50:48,005 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-11T09:50:48,813 WARN [Thread-123 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7ba4e899-2673-5e61-3f0e-a78f432d161e/cluster_dfc4bfcd-800b-06f9-90c7-85deae558f04/data/data4/current/BP-1086906460-172.17.0.2-1733910646032/current, will proceed with Du for space computation calculation, 2024-12-11T09:50:48,813 WARN [Thread-124 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7ba4e899-2673-5e61-3f0e-a78f432d161e/cluster_dfc4bfcd-800b-06f9-90c7-85deae558f04/data/data1/current/BP-1086906460-172.17.0.2-1733910646032/current, will proceed with Du for space computation calculation, 2024-12-11T09:50:48,813 WARN [Thread-122 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7ba4e899-2673-5e61-3f0e-a78f432d161e/cluster_dfc4bfcd-800b-06f9-90c7-85deae558f04/data/data3/current/BP-1086906460-172.17.0.2-1733910646032/current, will proceed with Du for space computation calculation, 2024-12-11T09:50:48,813 WARN [Thread-125 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7ba4e899-2673-5e61-3f0e-a78f432d161e/cluster_dfc4bfcd-800b-06f9-90c7-85deae558f04/data/data2/current/BP-1086906460-172.17.0.2-1733910646032/current, will proceed with Du for space computation calculation, 2024-12-11T09:50:48,835 WARN [Thread-132 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7ba4e899-2673-5e61-3f0e-a78f432d161e/cluster_dfc4bfcd-800b-06f9-90c7-85deae558f04/data/data5/current/BP-1086906460-172.17.0.2-1733910646032/current, will proceed with Du for space computation calculation, 2024-12-11T09:50:48,835 WARN [Thread-133 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7ba4e899-2673-5e61-3f0e-a78f432d161e/cluster_dfc4bfcd-800b-06f9-90c7-85deae558f04/data/data6/current/BP-1086906460-172.17.0.2-1733910646032/current, will proceed with Du for space computation calculation, 2024-12-11T09:50:48,860 WARN [Thread-81 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-11T09:50:48,860 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-11T09:50:48,866 WARN [Thread-103 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-11T09:50:48,910 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xb047bb7aee766fb1 with lease ID 0xd220c7cabed3a5e3: Processing first storage report for DS-579cfa22-dfa0-4809-80fe-88882a01644b from datanode DatanodeRegistration(127.0.0.1:41951, datanodeUuid=ffd97ac0-20fd-4145-af29-8c16f89a6d6e, infoPort=38753, infoSecurePort=0, ipcPort=42089, storageInfo=lv=-57;cid=testClusterID;nsid=2127042242;c=1733910646032) 2024-12-11T09:50:48,912 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb047bb7aee766fb1 with lease ID 0xd220c7cabed3a5e3: from storage DS-579cfa22-dfa0-4809-80fe-88882a01644b node DatanodeRegistration(127.0.0.1:41951, datanodeUuid=ffd97ac0-20fd-4145-af29-8c16f89a6d6e, infoPort=38753, infoSecurePort=0, ipcPort=42089, storageInfo=lv=-57;cid=testClusterID;nsid=2127042242;c=1733910646032), blocks: 0, hasStaleStorage: true, processing time: 2 msecs, invalidatedBlocks: 0 2024-12-11T09:50:48,912 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x9f806fb7cfe4292d with lease ID 0xd220c7cabed3a5e4: Processing first storage report for DS-ba2bc2cd-6d41-4b86-8ac0-1c2621f21ea7 from datanode DatanodeRegistration(127.0.0.1:44455, datanodeUuid=8bd095ef-413b-445d-a8ff-fe1511ae1d57, infoPort=45321, infoSecurePort=0, ipcPort=46513, storageInfo=lv=-57;cid=testClusterID;nsid=2127042242;c=1733910646032) 2024-12-11T09:50:48,912 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x9f806fb7cfe4292d with lease ID 0xd220c7cabed3a5e4: from storage DS-ba2bc2cd-6d41-4b86-8ac0-1c2621f21ea7 node DatanodeRegistration(127.0.0.1:44455, datanodeUuid=8bd095ef-413b-445d-a8ff-fe1511ae1d57, infoPort=45321, infoSecurePort=0, ipcPort=46513, storageInfo=lv=-57;cid=testClusterID;nsid=2127042242;c=1733910646032), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-11T09:50:48,913 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xa735c766c99d0aac with lease ID 0xd220c7cabed3a5e2: Processing first storage report for DS-a6fa4e0c-76fa-4357-8e30-177ad2fb02ae from datanode DatanodeRegistration(127.0.0.1:46813, datanodeUuid=0f6b940d-636d-414c-8460-b06df22cc5cc, infoPort=42503, infoSecurePort=0, ipcPort=36925, storageInfo=lv=-57;cid=testClusterID;nsid=2127042242;c=1733910646032) 2024-12-11T09:50:48,913 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa735c766c99d0aac with lease ID 0xd220c7cabed3a5e2: from storage DS-a6fa4e0c-76fa-4357-8e30-177ad2fb02ae node DatanodeRegistration(127.0.0.1:46813, datanodeUuid=0f6b940d-636d-414c-8460-b06df22cc5cc, infoPort=42503, infoSecurePort=0, ipcPort=36925, storageInfo=lv=-57;cid=testClusterID;nsid=2127042242;c=1733910646032), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-11T09:50:48,913 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xb047bb7aee766fb1 with lease ID 0xd220c7cabed3a5e3: Processing first storage report for DS-b4b45af7-b446-4f91-9c21-5f4c5c86253b from datanode DatanodeRegistration(127.0.0.1:41951, datanodeUuid=ffd97ac0-20fd-4145-af29-8c16f89a6d6e, infoPort=38753, infoSecurePort=0, ipcPort=42089, storageInfo=lv=-57;cid=testClusterID;nsid=2127042242;c=1733910646032) 2024-12-11T09:50:48,913 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb047bb7aee766fb1 with lease ID 0xd220c7cabed3a5e3: from storage DS-b4b45af7-b446-4f91-9c21-5f4c5c86253b node DatanodeRegistration(127.0.0.1:41951, datanodeUuid=ffd97ac0-20fd-4145-af29-8c16f89a6d6e, infoPort=38753, infoSecurePort=0, ipcPort=42089, storageInfo=lv=-57;cid=testClusterID;nsid=2127042242;c=1733910646032), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-11T09:50:48,913 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x9f806fb7cfe4292d with lease ID 0xd220c7cabed3a5e4: Processing first storage report for DS-55cfaf45-710e-4d66-9f22-9db9555f3c2e from datanode DatanodeRegistration(127.0.0.1:44455, datanodeUuid=8bd095ef-413b-445d-a8ff-fe1511ae1d57, infoPort=45321, infoSecurePort=0, ipcPort=46513, storageInfo=lv=-57;cid=testClusterID;nsid=2127042242;c=1733910646032) 2024-12-11T09:50:48,914 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x9f806fb7cfe4292d with lease ID 0xd220c7cabed3a5e4: from storage DS-55cfaf45-710e-4d66-9f22-9db9555f3c2e node DatanodeRegistration(127.0.0.1:44455, datanodeUuid=8bd095ef-413b-445d-a8ff-fe1511ae1d57, infoPort=45321, infoSecurePort=0, ipcPort=46513, storageInfo=lv=-57;cid=testClusterID;nsid=2127042242;c=1733910646032), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-11T09:50:48,914 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xa735c766c99d0aac with lease ID 0xd220c7cabed3a5e2: Processing first storage report for DS-f4f9d7c5-2964-4967-be00-d7b6a8d5ba9e from datanode DatanodeRegistration(127.0.0.1:46813, datanodeUuid=0f6b940d-636d-414c-8460-b06df22cc5cc, infoPort=42503, infoSecurePort=0, ipcPort=36925, storageInfo=lv=-57;cid=testClusterID;nsid=2127042242;c=1733910646032) 2024-12-11T09:50:48,914 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa735c766c99d0aac with lease ID 0xd220c7cabed3a5e2: from storage DS-f4f9d7c5-2964-4967-be00-d7b6a8d5ba9e node DatanodeRegistration(127.0.0.1:46813, datanodeUuid=0f6b940d-636d-414c-8460-b06df22cc5cc, infoPort=42503, infoSecurePort=0, ipcPort=36925, storageInfo=lv=-57;cid=testClusterID;nsid=2127042242;c=1733910646032), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-11T09:50:48,935 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7ba4e899-2673-5e61-3f0e-a78f432d161e 2024-12-11T09:50:49,009 WARN [Time-limited test {}] erasurecode.ErasureCodeNative(55): ISA-L support is not available in your platform... using builtin-java codec where applicable 2024-12-11T09:50:49,070 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestHBaseWalOnEC#testReadWrite[0] Thread=162, OpenFileDescriptor=393, MaxFileDescriptor=1048576, SystemLoadAverage=270, ProcessCount=11, AvailableMemoryMB=8402 2024-12-11T09:50:49,072 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=3, rsPorts=, rsClass=null, numDataNodes=3, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-11T09:50:49,081 INFO [Time-limited test {}] hbase.HBaseTestingUtil(821): NOT STARTING DFS 2024-12-11T09:50:49,205 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7ba4e899-2673-5e61-3f0e-a78f432d161e/cluster_dfc4bfcd-800b-06f9-90c7-85deae558f04/zookeeper_0, clientPort=60518, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7ba4e899-2673-5e61-3f0e-a78f432d161e/cluster_dfc4bfcd-800b-06f9-90c7-85deae558f04/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7ba4e899-2673-5e61-3f0e-a78f432d161e/cluster_dfc4bfcd-800b-06f9-90c7-85deae558f04/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-11T09:50:49,217 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=60518 2024-12-11T09:50:49,230 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-11T09:50:49,234 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-11T09:50:49,324 WARN [Time-limited test {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-11T09:50:49,324 WARN [Time-limited test {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-11T09:50:49,370 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-366492811_22 at /127.0.0.1:40726 [Receiving block BP-1086906460-172.17.0.2-1733910646032:blk_-9223372036854775792_1001] {}] datanode.DataXceiver(331): 127.0.0.1:46813:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:40726 dst: /127.0.0.1:46813 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T09:50:49,394 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46813 is added to blk_-9223372036854775792_1002 (size=7) 2024-12-11T09:50:49,788 WARN [Time-limited test {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-11T09:50:49,795 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:34879/user/jenkins/test-data/0f1a18a3-705a-ac44-3173-a475130eeaf9 with version=8 2024-12-11T09:50:49,796 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:34879/user/jenkins/test-data/0f1a18a3-705a-ac44-3173-a475130eeaf9/hbase-staging 2024-12-11T09:50:49,876 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-12-11T09:50:50,106 INFO [Time-limited test {}] client.ConnectionUtils(128): master/3e788d7781dc:0 server-side Connection retries=45 2024-12-11T09:50:50,114 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-11T09:50:50,115 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-11T09:50:50,119 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-11T09:50:50,119 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-11T09:50:50,119 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-11T09:50:50,241 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-11T09:50:50,297 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-12-11T09:50:50,305 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-12-11T09:50:50,309 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-11T09:50:50,330 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 71890 (auto-detected) 2024-12-11T09:50:50,331 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-12-11T09:50:50,347 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:40323 2024-12-11T09:50:50,365 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:40323 connecting to ZooKeeper ensemble=127.0.0.1:60518 2024-12-11T09:50:50,478 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:403230x0, quorum=127.0.0.1:60518, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-11T09:50:50,484 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:40323-0x100147ff5e00000 connected 2024-12-11T09:50:50,561 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-11T09:50:50,564 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-11T09:50:50,573 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:40323-0x100147ff5e00000, quorum=127.0.0.1:60518, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-11T09:50:50,577 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:34879/user/jenkins/test-data/0f1a18a3-705a-ac44-3173-a475130eeaf9, hbase.cluster.distributed=false 2024-12-11T09:50:50,599 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:40323-0x100147ff5e00000, quorum=127.0.0.1:60518, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-11T09:50:50,603 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=40323 2024-12-11T09:50:50,603 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=40323 2024-12-11T09:50:50,604 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=40323 2024-12-11T09:50:50,604 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=40323 2024-12-11T09:50:50,605 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=40323 2024-12-11T09:50:50,701 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/3e788d7781dc:0 server-side Connection retries=45 2024-12-11T09:50:50,702 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-11T09:50:50,702 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-11T09:50:50,703 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-11T09:50:50,703 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-11T09:50:50,703 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-11T09:50:50,705 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-11T09:50:50,707 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-11T09:50:50,708 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:35681 2024-12-11T09:50:50,711 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:35681 connecting to ZooKeeper ensemble=127.0.0.1:60518 2024-12-11T09:50:50,712 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-11T09:50:50,716 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-11T09:50:50,726 DEBUG [pool-65-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:356810x0, quorum=127.0.0.1:60518, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-11T09:50:50,727 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:35681-0x100147ff5e00001 connected 2024-12-11T09:50:50,727 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35681-0x100147ff5e00001, quorum=127.0.0.1:60518, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-11T09:50:50,732 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-11T09:50:50,739 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-11T09:50:50,742 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35681-0x100147ff5e00001, quorum=127.0.0.1:60518, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-11T09:50:50,748 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35681-0x100147ff5e00001, quorum=127.0.0.1:60518, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-11T09:50:50,750 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=35681 2024-12-11T09:50:50,750 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=35681 2024-12-11T09:50:50,750 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=35681 2024-12-11T09:50:50,752 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=35681 2024-12-11T09:50:50,752 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=35681 2024-12-11T09:50:50,768 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/3e788d7781dc:0 server-side Connection retries=45 2024-12-11T09:50:50,768 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-11T09:50:50,768 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-11T09:50:50,769 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-11T09:50:50,769 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-11T09:50:50,769 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-11T09:50:50,769 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-11T09:50:50,769 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-11T09:50:50,770 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:44945 2024-12-11T09:50:50,771 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:44945 connecting to ZooKeeper ensemble=127.0.0.1:60518 2024-12-11T09:50:50,772 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-11T09:50:50,775 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-11T09:50:50,785 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:449450x0, quorum=127.0.0.1:60518, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-11T09:50:50,785 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:449450x0, quorum=127.0.0.1:60518, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-11T09:50:50,786 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:44945-0x100147ff5e00002 connected 2024-12-11T09:50:50,786 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-11T09:50:50,787 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-11T09:50:50,788 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44945-0x100147ff5e00002, quorum=127.0.0.1:60518, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-11T09:50:50,790 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44945-0x100147ff5e00002, quorum=127.0.0.1:60518, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-11T09:50:50,792 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=44945 2024-12-11T09:50:50,793 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=44945 2024-12-11T09:50:50,793 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=44945 2024-12-11T09:50:50,794 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=44945 2024-12-11T09:50:50,795 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=44945 2024-12-11T09:50:50,809 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/3e788d7781dc:0 server-side Connection retries=45 2024-12-11T09:50:50,809 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-11T09:50:50,809 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-11T09:50:50,809 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-11T09:50:50,809 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-11T09:50:50,809 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-11T09:50:50,809 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-11T09:50:50,810 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-11T09:50:50,811 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:38469 2024-12-11T09:50:50,812 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:38469 connecting to ZooKeeper ensemble=127.0.0.1:60518 2024-12-11T09:50:50,814 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-11T09:50:50,816 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-11T09:50:50,826 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:384690x0, quorum=127.0.0.1:60518, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-11T09:50:50,827 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38469-0x100147ff5e00003, quorum=127.0.0.1:60518, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-11T09:50:50,827 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:38469-0x100147ff5e00003 connected 2024-12-11T09:50:50,828 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-11T09:50:50,829 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-11T09:50:50,830 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38469-0x100147ff5e00003, quorum=127.0.0.1:60518, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-11T09:50:50,832 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38469-0x100147ff5e00003, quorum=127.0.0.1:60518, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-11T09:50:50,833 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=38469 2024-12-11T09:50:50,833 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=38469 2024-12-11T09:50:50,835 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=38469 2024-12-11T09:50:50,839 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=38469 2024-12-11T09:50:50,841 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=38469 2024-12-11T09:50:50,855 DEBUG [M:0;3e788d7781dc:40323 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;3e788d7781dc:40323 2024-12-11T09:50:50,856 INFO [master/3e788d7781dc:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/3e788d7781dc,40323,1733910649947 2024-12-11T09:50:50,868 DEBUG [pool-65-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35681-0x100147ff5e00001, quorum=127.0.0.1:60518, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-11T09:50:50,868 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38469-0x100147ff5e00003, quorum=127.0.0.1:60518, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-11T09:50:50,868 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44945-0x100147ff5e00002, quorum=127.0.0.1:60518, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-11T09:50:50,868 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40323-0x100147ff5e00000, quorum=127.0.0.1:60518, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-11T09:50:50,871 DEBUG [master/3e788d7781dc:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:40323-0x100147ff5e00000, quorum=127.0.0.1:60518, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/3e788d7781dc,40323,1733910649947 2024-12-11T09:50:50,901 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38469-0x100147ff5e00003, quorum=127.0.0.1:60518, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-11T09:50:50,901 DEBUG [pool-65-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35681-0x100147ff5e00001, quorum=127.0.0.1:60518, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-11T09:50:50,901 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44945-0x100147ff5e00002, quorum=127.0.0.1:60518, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-11T09:50:50,901 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40323-0x100147ff5e00000, quorum=127.0.0.1:60518, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T09:50:50,901 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38469-0x100147ff5e00003, quorum=127.0.0.1:60518, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T09:50:50,901 DEBUG [pool-65-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35681-0x100147ff5e00001, quorum=127.0.0.1:60518, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T09:50:50,901 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44945-0x100147ff5e00002, quorum=127.0.0.1:60518, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T09:50:50,902 DEBUG [master/3e788d7781dc:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:40323-0x100147ff5e00000, quorum=127.0.0.1:60518, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-11T09:50:50,904 INFO [master/3e788d7781dc:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/3e788d7781dc,40323,1733910649947 from backup master directory 2024-12-11T09:50:50,916 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38469-0x100147ff5e00003, quorum=127.0.0.1:60518, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-11T09:50:50,916 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44945-0x100147ff5e00002, quorum=127.0.0.1:60518, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-11T09:50:50,916 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40323-0x100147ff5e00000, quorum=127.0.0.1:60518, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/3e788d7781dc,40323,1733910649947 2024-12-11T09:50:50,916 DEBUG [pool-65-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35681-0x100147ff5e00001, quorum=127.0.0.1:60518, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-11T09:50:50,916 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40323-0x100147ff5e00000, quorum=127.0.0.1:60518, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-11T09:50:50,917 WARN [master/3e788d7781dc:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-11T09:50:50,917 INFO [master/3e788d7781dc:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=3e788d7781dc,40323,1733910649947 2024-12-11T09:50:50,919 INFO [master/3e788d7781dc:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-12-11T09:50:50,920 INFO [master/3e788d7781dc:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-12-11T09:50:50,979 DEBUG [master/3e788d7781dc:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:34879/user/jenkins/test-data/0f1a18a3-705a-ac44-3173-a475130eeaf9/hbase.id] with ID: 1bb11542-c30d-4b85-9a0c-1185c87ff497 2024-12-11T09:50:50,980 DEBUG [master/3e788d7781dc:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:34879/user/jenkins/test-data/0f1a18a3-705a-ac44-3173-a475130eeaf9/.tmp/hbase.id 2024-12-11T09:50:50,986 WARN [master/3e788d7781dc:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-11T09:50:50,986 WARN [master/3e788d7781dc:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-11T09:50:50,989 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-366492811_22 at /127.0.0.1:33240 [Receiving block BP-1086906460-172.17.0.2-1733910646032:blk_-9223372036854775776_1003] {}] datanode.DataXceiver(331): 127.0.0.1:44455:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:33240 dst: /127.0.0.1:44455 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T09:50:50,994 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44455 is added to blk_-9223372036854775776_1004 (size=42) 2024-12-11T09:50:50,995 WARN [master/3e788d7781dc:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-11T09:50:50,995 DEBUG [master/3e788d7781dc:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:34879/user/jenkins/test-data/0f1a18a3-705a-ac44-3173-a475130eeaf9/.tmp/hbase.id]:[hdfs://localhost:34879/user/jenkins/test-data/0f1a18a3-705a-ac44-3173-a475130eeaf9/hbase.id] 2024-12-11T09:50:51,044 INFO [master/3e788d7781dc:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-11T09:50:51,049 INFO [master/3e788d7781dc:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-11T09:50:51,066 INFO [master/3e788d7781dc:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 15ms. 2024-12-11T09:50:51,076 DEBUG [pool-65-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35681-0x100147ff5e00001, quorum=127.0.0.1:60518, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T09:50:51,076 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44945-0x100147ff5e00002, quorum=127.0.0.1:60518, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T09:50:51,076 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38469-0x100147ff5e00003, quorum=127.0.0.1:60518, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T09:50:51,076 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40323-0x100147ff5e00000, quorum=127.0.0.1:60518, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T09:50:51,088 WARN [master/3e788d7781dc:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-11T09:50:51,088 WARN [master/3e788d7781dc:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-11T09:50:51,091 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-366492811_22 at /127.0.0.1:54702 [Receiving block BP-1086906460-172.17.0.2-1733910646032:blk_-9223372036854775760_1005] {}] datanode.DataXceiver(331): 127.0.0.1:41951:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54702 dst: /127.0.0.1:41951 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T09:50:51,095 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41951 is added to blk_-9223372036854775760_1006 (size=196) 2024-12-11T09:50:51,097 WARN [master/3e788d7781dc:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-11T09:50:51,111 INFO [master/3e788d7781dc:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-11T09:50:51,112 INFO [master/3e788d7781dc:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-11T09:50:51,117 INFO [master/3e788d7781dc:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-11T09:50:51,141 WARN [master/3e788d7781dc:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-11T09:50:51,141 WARN [master/3e788d7781dc:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-11T09:50:51,144 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-366492811_22 at /127.0.0.1:41480 [Receiving block BP-1086906460-172.17.0.2-1733910646032:blk_-9223372036854775744_1007] {}] datanode.DataXceiver(331): 127.0.0.1:46813:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41480 dst: /127.0.0.1:46813 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T09:50:51,149 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46813 is added to blk_-9223372036854775744_1008 (size=1189) 2024-12-11T09:50:51,150 WARN [master/3e788d7781dc:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-11T09:50:51,163 INFO [master/3e788d7781dc:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:34879/user/jenkins/test-data/0f1a18a3-705a-ac44-3173-a475130eeaf9/MasterData/data/master/store 2024-12-11T09:50:51,177 WARN [master/3e788d7781dc:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-11T09:50:51,178 WARN [master/3e788d7781dc:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-11T09:50:51,181 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-366492811_22 at /127.0.0.1:54712 [Receiving block BP-1086906460-172.17.0.2-1733910646032:blk_-9223372036854775728_1009] {}] datanode.DataXceiver(331): 127.0.0.1:41951:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54712 dst: /127.0.0.1:41951 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T09:50:51,185 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41951 is added to blk_-9223372036854775728_1010 (size=34) 2024-12-11T09:50:51,186 WARN [master/3e788d7781dc:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-11T09:50:51,189 INFO [master/3e788d7781dc:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-12-11T09:50:51,192 DEBUG [master/3e788d7781dc:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T09:50:51,193 DEBUG [master/3e788d7781dc:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-11T09:50:51,193 INFO [master/3e788d7781dc:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-11T09:50:51,193 DEBUG [master/3e788d7781dc:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-11T09:50:51,194 DEBUG [master/3e788d7781dc:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-11T09:50:51,195 DEBUG [master/3e788d7781dc:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-11T09:50:51,195 INFO [master/3e788d7781dc:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-11T09:50:51,196 DEBUG [master/3e788d7781dc:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733910651193Disabling compacts and flushes for region at 1733910651193Disabling writes for close at 1733910651194 (+1 ms)Writing region close event to WAL at 1733910651195 (+1 ms)Closed at 1733910651195 2024-12-11T09:50:51,198 WARN [master/3e788d7781dc:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:34879/user/jenkins/test-data/0f1a18a3-705a-ac44-3173-a475130eeaf9/MasterData/data/master/store/.initializing 2024-12-11T09:50:51,198 DEBUG [master/3e788d7781dc:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:34879/user/jenkins/test-data/0f1a18a3-705a-ac44-3173-a475130eeaf9/MasterData/WALs/3e788d7781dc,40323,1733910649947 2024-12-11T09:50:51,205 INFO [master/3e788d7781dc:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-11T09:50:51,219 INFO [master/3e788d7781dc:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=3e788d7781dc%2C40323%2C1733910649947, suffix=, logDir=hdfs://localhost:34879/user/jenkins/test-data/0f1a18a3-705a-ac44-3173-a475130eeaf9/MasterData/WALs/3e788d7781dc,40323,1733910649947, archiveDir=hdfs://localhost:34879/user/jenkins/test-data/0f1a18a3-705a-ac44-3173-a475130eeaf9/MasterData/oldWALs, maxLogs=10 2024-12-11T09:50:51,255 DEBUG [master/3e788d7781dc:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/0f1a18a3-705a-ac44-3173-a475130eeaf9/MasterData/WALs/3e788d7781dc,40323,1733910649947/3e788d7781dc%2C40323%2C1733910649947.1733910651225, exclude list is [], retry=0 2024-12-11T09:50:51,272 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(244): No decryptEncryptedDataEncryptionKey method in DFSClient, should be hadoop version with HDFS-12396 java.lang.NoSuchMethodException: org.apache.hadoop.hdfs.DFSClient.decryptEncryptedDataEncryptionKey(org.apache.hadoop.fs.FileEncryptionInfo) at java.lang.Class.getDeclaredMethod(Class.java:2675) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelperWithoutHDFS12396(FanOutOneBlockAsyncDFSOutputSaslHelper.java:183) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelper(FanOutOneBlockAsyncDFSOutputSaslHelper.java:242) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.(FanOutOneBlockAsyncDFSOutputSaslHelper.java:253) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.initialize(FanOutOneBlockAsyncDFSOutputHelper.java:413) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper$5.operationComplete(FanOutOneBlockAsyncDFSOutputHelper.java:472) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper$5.operationComplete(FanOutOneBlockAsyncDFSOutputHelper.java:467) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.NettyFutureUtils.lambda$addListener$0(NettyFutureUtils.java:56) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListener0(DefaultPromise.java:590) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListeners0(DefaultPromise.java:583) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListenersNow(DefaultPromise.java:559) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListeners(DefaultPromise.java:492) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.setValue0(DefaultPromise.java:636) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.setSuccess0(DefaultPromise.java:625) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.trySuccess(DefaultPromise.java:105) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPromise.trySuccess(DefaultChannelPromise.java:84) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.fulfillConnectPromise(AbstractEpollChannel.java:658) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.finishConnect(AbstractEpollChannel.java:696) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.epollOutReady(AbstractEpollChannel.java:567) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.processReady(EpollEventLoop.java:491) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:399) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T09:50:51,273 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46813,DS-a6fa4e0c-76fa-4357-8e30-177ad2fb02ae,DISK] 2024-12-11T09:50:51,273 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44455,DS-ba2bc2cd-6d41-4b86-8ac0-1c2621f21ea7,DISK] 2024-12-11T09:50:51,273 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:41951,DS-579cfa22-dfa0-4809-80fe-88882a01644b,DISK] 2024-12-11T09:50:51,277 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.ProtobufDecoder(117): Hadoop 3.3 and above shades protobuf. 2024-12-11T09:50:51,311 INFO [master/3e788d7781dc:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/0f1a18a3-705a-ac44-3173-a475130eeaf9/MasterData/WALs/3e788d7781dc,40323,1733910649947/3e788d7781dc%2C40323%2C1733910649947.1733910651225 2024-12-11T09:50:51,312 DEBUG [master/3e788d7781dc:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:45321:45321),(127.0.0.1/127.0.0.1:42503:42503),(127.0.0.1/127.0.0.1:38753:38753)] 2024-12-11T09:50:51,313 DEBUG [master/3e788d7781dc:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-11T09:50:51,313 DEBUG [master/3e788d7781dc:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T09:50:51,315 DEBUG [master/3e788d7781dc:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-11T09:50:51,316 DEBUG [master/3e788d7781dc:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-11T09:50:51,348 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-11T09:50:51,369 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-11T09:50:51,372 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T09:50:51,374 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-11T09:50:51,374 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-11T09:50:51,377 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-11T09:50:51,377 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T09:50:51,378 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T09:50:51,378 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-11T09:50:51,380 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-11T09:50:51,380 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T09:50:51,381 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T09:50:51,381 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-11T09:50:51,384 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-11T09:50:51,384 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T09:50:51,385 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T09:50:51,385 DEBUG [master/3e788d7781dc:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-11T09:50:51,388 DEBUG [master/3e788d7781dc:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34879/user/jenkins/test-data/0f1a18a3-705a-ac44-3173-a475130eeaf9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-11T09:50:51,389 DEBUG [master/3e788d7781dc:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34879/user/jenkins/test-data/0f1a18a3-705a-ac44-3173-a475130eeaf9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-11T09:50:51,394 DEBUG [master/3e788d7781dc:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-11T09:50:51,394 DEBUG [master/3e788d7781dc:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-11T09:50:51,397 DEBUG [master/3e788d7781dc:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-11T09:50:51,400 DEBUG [master/3e788d7781dc:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-11T09:50:51,405 DEBUG [master/3e788d7781dc:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34879/user/jenkins/test-data/0f1a18a3-705a-ac44-3173-a475130eeaf9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-11T09:50:51,406 INFO [master/3e788d7781dc:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=73163737, jitterRate=0.09022463858127594}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-11T09:50:51,412 DEBUG [master/3e788d7781dc:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733910651326Initializing all the Stores at 1733910651328 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733910651328Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733910651329 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733910651329Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733910651329Cleaning up temporary data from old regions at 1733910651394 (+65 ms)Region opened successfully at 1733910651412 (+18 ms) 2024-12-11T09:50:51,413 INFO [master/3e788d7781dc:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-11T09:50:51,443 DEBUG [master/3e788d7781dc:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@14f3df11, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=3e788d7781dc/172.17.0.2:0 2024-12-11T09:50:51,469 INFO [master/3e788d7781dc:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-11T09:50:51,479 INFO [master/3e788d7781dc:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-11T09:50:51,479 INFO [master/3e788d7781dc:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-11T09:50:51,481 INFO [master/3e788d7781dc:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-11T09:50:51,482 INFO [master/3e788d7781dc:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 1 msec 2024-12-11T09:50:51,487 INFO [master/3e788d7781dc:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 4 msec 2024-12-11T09:50:51,487 INFO [master/3e788d7781dc:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-11T09:50:51,509 INFO [master/3e788d7781dc:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-11T09:50:51,516 DEBUG [master/3e788d7781dc:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40323-0x100147ff5e00000, quorum=127.0.0.1:60518, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-11T09:50:51,559 DEBUG [master/3e788d7781dc:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-11T09:50:51,564 INFO [master/3e788d7781dc:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-11T09:50:51,567 DEBUG [master/3e788d7781dc:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40323-0x100147ff5e00000, quorum=127.0.0.1:60518, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-11T09:50:51,576 DEBUG [master/3e788d7781dc:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-11T09:50:51,578 INFO [master/3e788d7781dc:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-11T09:50:51,582 DEBUG [master/3e788d7781dc:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40323-0x100147ff5e00000, quorum=127.0.0.1:60518, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-11T09:50:51,591 DEBUG [master/3e788d7781dc:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-11T09:50:51,592 DEBUG [master/3e788d7781dc:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40323-0x100147ff5e00000, quorum=127.0.0.1:60518, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-11T09:50:51,601 DEBUG [master/3e788d7781dc:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-11T09:50:51,623 DEBUG [master/3e788d7781dc:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40323-0x100147ff5e00000, quorum=127.0.0.1:60518, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-11T09:50:51,632 DEBUG [master/3e788d7781dc:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-11T09:50:51,642 DEBUG [pool-65-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35681-0x100147ff5e00001, quorum=127.0.0.1:60518, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-11T09:50:51,642 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44945-0x100147ff5e00002, quorum=127.0.0.1:60518, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-11T09:50:51,642 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40323-0x100147ff5e00000, quorum=127.0.0.1:60518, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-11T09:50:51,643 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38469-0x100147ff5e00003, quorum=127.0.0.1:60518, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-11T09:50:51,643 DEBUG [pool-65-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35681-0x100147ff5e00001, quorum=127.0.0.1:60518, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T09:50:51,643 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44945-0x100147ff5e00002, quorum=127.0.0.1:60518, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T09:50:51,643 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40323-0x100147ff5e00000, quorum=127.0.0.1:60518, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T09:50:51,643 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38469-0x100147ff5e00003, quorum=127.0.0.1:60518, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T09:50:51,646 INFO [master/3e788d7781dc:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=3e788d7781dc,40323,1733910649947, sessionid=0x100147ff5e00000, setting cluster-up flag (Was=false) 2024-12-11T09:50:51,667 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40323-0x100147ff5e00000, quorum=127.0.0.1:60518, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T09:50:51,667 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44945-0x100147ff5e00002, quorum=127.0.0.1:60518, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T09:50:51,668 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38469-0x100147ff5e00003, quorum=127.0.0.1:60518, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T09:50:51,668 DEBUG [pool-65-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35681-0x100147ff5e00001, quorum=127.0.0.1:60518, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T09:50:51,693 DEBUG [master/3e788d7781dc:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-11T09:50:51,697 DEBUG [master/3e788d7781dc:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=3e788d7781dc,40323,1733910649947 2024-12-11T09:50:51,718 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38469-0x100147ff5e00003, quorum=127.0.0.1:60518, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T09:50:51,718 DEBUG [pool-65-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35681-0x100147ff5e00001, quorum=127.0.0.1:60518, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T09:50:51,718 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44945-0x100147ff5e00002, quorum=127.0.0.1:60518, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T09:50:51,718 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40323-0x100147ff5e00000, quorum=127.0.0.1:60518, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T09:50:51,743 DEBUG [master/3e788d7781dc:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-11T09:50:51,746 DEBUG [master/3e788d7781dc:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=3e788d7781dc,40323,1733910649947 2024-12-11T09:50:51,756 INFO [master/3e788d7781dc:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:34879/user/jenkins/test-data/0f1a18a3-705a-ac44-3173-a475130eeaf9/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-11T09:50:51,821 DEBUG [master/3e788d7781dc:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-11T09:50:51,829 INFO [master/3e788d7781dc:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-11T09:50:51,835 INFO [master/3e788d7781dc:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-11T09:50:51,839 DEBUG [master/3e788d7781dc:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 3e788d7781dc,40323,1733910649947 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-11T09:50:51,845 INFO [RS:1;3e788d7781dc:44945 {}] regionserver.HRegionServer(746): ClusterId : 1bb11542-c30d-4b85-9a0c-1185c87ff497 2024-12-11T09:50:51,845 INFO [RS:0;3e788d7781dc:35681 {}] regionserver.HRegionServer(746): ClusterId : 1bb11542-c30d-4b85-9a0c-1185c87ff497 2024-12-11T09:50:51,845 INFO [RS:2;3e788d7781dc:38469 {}] regionserver.HRegionServer(746): ClusterId : 1bb11542-c30d-4b85-9a0c-1185c87ff497 2024-12-11T09:50:51,847 DEBUG [master/3e788d7781dc:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/3e788d7781dc:0, corePoolSize=5, maxPoolSize=5 2024-12-11T09:50:51,847 DEBUG [master/3e788d7781dc:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/3e788d7781dc:0, corePoolSize=5, maxPoolSize=5 2024-12-11T09:50:51,847 DEBUG [master/3e788d7781dc:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/3e788d7781dc:0, corePoolSize=5, maxPoolSize=5 2024-12-11T09:50:51,847 DEBUG [master/3e788d7781dc:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/3e788d7781dc:0, corePoolSize=5, maxPoolSize=5 2024-12-11T09:50:51,848 DEBUG [master/3e788d7781dc:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/3e788d7781dc:0, corePoolSize=10, maxPoolSize=10 2024-12-11T09:50:51,848 DEBUG [master/3e788d7781dc:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/3e788d7781dc:0, corePoolSize=1, maxPoolSize=1 2024-12-11T09:50:51,848 DEBUG [RS:1;3e788d7781dc:44945 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-11T09:50:51,848 DEBUG [RS:2;3e788d7781dc:38469 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-11T09:50:51,848 DEBUG [RS:0;3e788d7781dc:35681 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-11T09:50:51,848 DEBUG [master/3e788d7781dc:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/3e788d7781dc:0, corePoolSize=2, maxPoolSize=2 2024-12-11T09:50:51,848 DEBUG [master/3e788d7781dc:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/3e788d7781dc:0, corePoolSize=1, maxPoolSize=1 2024-12-11T09:50:51,852 INFO [master/3e788d7781dc:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733910681852 2024-12-11T09:50:51,854 INFO [master/3e788d7781dc:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-11T09:50:51,855 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-11T09:50:51,855 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-11T09:50:51,855 INFO [master/3e788d7781dc:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-11T09:50:51,860 INFO [master/3e788d7781dc:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-11T09:50:51,860 INFO [master/3e788d7781dc:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-11T09:50:51,861 INFO [master/3e788d7781dc:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-11T09:50:51,861 INFO [master/3e788d7781dc:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-11T09:50:51,861 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T09:50:51,862 DEBUG [RS:2;3e788d7781dc:38469 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-11T09:50:51,862 DEBUG [RS:0;3e788d7781dc:35681 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-11T09:50:51,862 DEBUG [RS:1;3e788d7781dc:44945 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-11T09:50:51,862 DEBUG [RS:2;3e788d7781dc:38469 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-11T09:50:51,862 DEBUG [RS:1;3e788d7781dc:44945 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-11T09:50:51,862 DEBUG [RS:0;3e788d7781dc:35681 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-11T09:50:51,862 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-11T09:50:51,862 INFO [master/3e788d7781dc:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-11T09:50:51,866 INFO [master/3e788d7781dc:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-11T09:50:51,868 INFO [master/3e788d7781dc:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-11T09:50:51,868 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-11T09:50:51,868 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-11T09:50:51,869 INFO [master/3e788d7781dc:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-11T09:50:51,869 DEBUG [RS:1;3e788d7781dc:44945 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-11T09:50:51,870 DEBUG [RS:1;3e788d7781dc:44945 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@52876f54, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=3e788d7781dc/172.17.0.2:0 2024-12-11T09:50:51,870 DEBUG [RS:2;3e788d7781dc:38469 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-11T09:50:51,871 DEBUG [RS:2;3e788d7781dc:38469 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3078c59c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=3e788d7781dc/172.17.0.2:0 2024-12-11T09:50:51,871 DEBUG [RS:0;3e788d7781dc:35681 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-11T09:50:51,872 DEBUG [RS:0;3e788d7781dc:35681 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5f48536, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=3e788d7781dc/172.17.0.2:0 2024-12-11T09:50:51,876 INFO [master/3e788d7781dc:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-11T09:50:51,876 INFO [master/3e788d7781dc:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-11T09:50:51,878 DEBUG [master/3e788d7781dc:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/3e788d7781dc:0:becomeActiveMaster-HFileCleaner.large.0-1733910651878,5,FailOnTimeoutGroup] 2024-12-11T09:50:51,883 DEBUG [master/3e788d7781dc:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/3e788d7781dc:0:becomeActiveMaster-HFileCleaner.small.0-1733910651879,5,FailOnTimeoutGroup] 2024-12-11T09:50:51,883 INFO [master/3e788d7781dc:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-11T09:50:51,884 INFO [master/3e788d7781dc:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-11T09:50:51,885 INFO [master/3e788d7781dc:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-11T09:50:51,885 INFO [master/3e788d7781dc:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-11T09:50:51,890 DEBUG [RS:1;3e788d7781dc:44945 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;3e788d7781dc:44945 2024-12-11T09:50:51,892 DEBUG [RS:0;3e788d7781dc:35681 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;3e788d7781dc:35681 2024-12-11T09:50:51,892 DEBUG [RS:2;3e788d7781dc:38469 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:2;3e788d7781dc:38469 2024-12-11T09:50:51,893 INFO [RS:2;3e788d7781dc:38469 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-11T09:50:51,893 INFO [RS:1;3e788d7781dc:44945 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-11T09:50:51,893 INFO [RS:2;3e788d7781dc:38469 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-11T09:50:51,893 INFO [RS:1;3e788d7781dc:44945 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-11T09:50:51,893 DEBUG [RS:2;3e788d7781dc:38469 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-11T09:50:51,893 DEBUG [RS:1;3e788d7781dc:44945 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-11T09:50:51,894 INFO [RS:0;3e788d7781dc:35681 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-11T09:50:51,894 INFO [RS:0;3e788d7781dc:35681 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-11T09:50:51,894 DEBUG [RS:0;3e788d7781dc:35681 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-11T09:50:51,895 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-366492811_22 at /127.0.0.1:41506 [Receiving block BP-1086906460-172.17.0.2-1733910646032:blk_-9223372036854775712_1012] {}] datanode.DataXceiver(331): 127.0.0.1:46813:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41506 dst: /127.0.0.1:46813 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T09:50:51,896 INFO [RS:1;3e788d7781dc:44945 {}] regionserver.HRegionServer(2659): reportForDuty to master=3e788d7781dc,40323,1733910649947 with port=44945, startcode=1733910650767 2024-12-11T09:50:51,896 INFO [RS:2;3e788d7781dc:38469 {}] regionserver.HRegionServer(2659): reportForDuty to master=3e788d7781dc,40323,1733910649947 with port=38469, startcode=1733910650808 2024-12-11T09:50:51,896 INFO [RS:0;3e788d7781dc:35681 {}] regionserver.HRegionServer(2659): reportForDuty to master=3e788d7781dc,40323,1733910649947 with port=35681, startcode=1733910650669 2024-12-11T09:50:51,909 DEBUG [RS:0;3e788d7781dc:35681 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-11T09:50:51,909 DEBUG [RS:2;3e788d7781dc:38469 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-11T09:50:51,909 DEBUG [RS:1;3e788d7781dc:44945 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-11T09:50:51,911 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46813 is added to blk_-9223372036854775712_1013 (size=1321) 2024-12-11T09:50:51,914 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-11T09:50:51,916 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:34879/user/jenkins/test-data/0f1a18a3-705a-ac44-3173-a475130eeaf9/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-11T09:50:51,916 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:34879/user/jenkins/test-data/0f1a18a3-705a-ac44-3173-a475130eeaf9 2024-12-11T09:50:51,924 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44455 is added to blk_-9223372036854775789_1002 (size=7) 2024-12-11T09:50:51,930 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41951 is added to blk_-9223372036854775788_1002 (size=7) 2024-12-11T09:50:51,937 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-11T09:50:51,937 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-11T09:50:51,950 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-366492811_22 at /127.0.0.1:33278 [Receiving block BP-1086906460-172.17.0.2-1733910646032:blk_-9223372036854775696_1014] {}] datanode.DataXceiver(331): 127.0.0.1:44455:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:33278 dst: /127.0.0.1:44455 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T09:50:51,958 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34223, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-12-11T09:50:51,958 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60075, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-12-11T09:50:51,958 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60829, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-12-11T09:50:51,960 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44455 is added to blk_-9223372036854775696_1015 (size=32) 2024-12-11T09:50:51,961 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-11T09:50:51,963 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T09:50:51,966 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-11T09:50:51,966 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40323 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 3e788d7781dc,35681,1733910650669 2024-12-11T09:50:51,969 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-11T09:50:51,969 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T09:50:51,969 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40323 {}] master.ServerManager(517): Registering regionserver=3e788d7781dc,35681,1733910650669 2024-12-11T09:50:51,970 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-11T09:50:51,970 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-11T09:50:51,973 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-11T09:50:51,974 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T09:50:51,975 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-11T09:50:51,975 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-11T09:50:51,978 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-11T09:50:51,978 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T09:50:51,979 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-11T09:50:51,980 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-11T09:50:51,982 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-11T09:50:51,983 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T09:50:51,984 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40323 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 3e788d7781dc,44945,1733910650767 2024-12-11T09:50:51,984 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-11T09:50:51,984 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40323 {}] master.ServerManager(517): Registering regionserver=3e788d7781dc,44945,1733910650767 2024-12-11T09:50:51,984 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-11T09:50:51,987 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34879/user/jenkins/test-data/0f1a18a3-705a-ac44-3173-a475130eeaf9/data/hbase/meta/1588230740 2024-12-11T09:50:51,988 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34879/user/jenkins/test-data/0f1a18a3-705a-ac44-3173-a475130eeaf9/data/hbase/meta/1588230740 2024-12-11T09:50:51,988 DEBUG [RS:0;3e788d7781dc:35681 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:34879/user/jenkins/test-data/0f1a18a3-705a-ac44-3173-a475130eeaf9 2024-12-11T09:50:51,989 DEBUG [RS:0;3e788d7781dc:35681 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:34879 2024-12-11T09:50:51,989 DEBUG [RS:0;3e788d7781dc:35681 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-11T09:50:51,990 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40323 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 3e788d7781dc,38469,1733910650808 2024-12-11T09:50:51,990 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40323 {}] master.ServerManager(517): Registering regionserver=3e788d7781dc,38469,1733910650808 2024-12-11T09:50:51,991 DEBUG [RS:1;3e788d7781dc:44945 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:34879/user/jenkins/test-data/0f1a18a3-705a-ac44-3173-a475130eeaf9 2024-12-11T09:50:51,991 DEBUG [RS:1;3e788d7781dc:44945 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:34879 2024-12-11T09:50:51,991 DEBUG [RS:1;3e788d7781dc:44945 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-11T09:50:51,993 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-11T09:50:51,993 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-11T09:50:51,994 DEBUG [RS:2;3e788d7781dc:38469 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:34879/user/jenkins/test-data/0f1a18a3-705a-ac44-3173-a475130eeaf9 2024-12-11T09:50:51,994 DEBUG [RS:2;3e788d7781dc:38469 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:34879 2024-12-11T09:50:51,994 DEBUG [RS:2;3e788d7781dc:38469 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-11T09:50:51,995 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-11T09:50:51,998 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-11T09:50:52,001 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40323-0x100147ff5e00000, quorum=127.0.0.1:60518, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-11T09:50:52,003 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34879/user/jenkins/test-data/0f1a18a3-705a-ac44-3173-a475130eeaf9/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-11T09:50:52,004 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=70307485, jitterRate=0.047663167119026184}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-11T09:50:52,007 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733910651963Initializing all the Stores at 1733910651965 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733910651965Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733910651965Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733910651965Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733910651966 (+1 ms)Cleaning up temporary data from old regions at 1733910651993 (+27 ms)Region opened successfully at 1733910652006 (+13 ms) 2024-12-11T09:50:52,007 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-11T09:50:52,007 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-11T09:50:52,007 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-11T09:50:52,007 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-11T09:50:52,007 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-11T09:50:52,008 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-11T09:50:52,009 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733910652007Disabling compacts and flushes for region at 1733910652007Disabling writes for close at 1733910652007Writing region close event to WAL at 1733910652008 (+1 ms)Closed at 1733910652008 2024-12-11T09:50:52,011 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-11T09:50:52,011 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-11T09:50:52,017 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-11T09:50:52,023 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-11T09:50:52,026 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-11T09:50:52,028 DEBUG [RS:0;3e788d7781dc:35681 {}] zookeeper.ZKUtil(111): regionserver:35681-0x100147ff5e00001, quorum=127.0.0.1:60518, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/3e788d7781dc,35681,1733910650669 2024-12-11T09:50:52,029 WARN [RS:0;3e788d7781dc:35681 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-11T09:50:52,029 INFO [RS:0;3e788d7781dc:35681 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-11T09:50:52,029 DEBUG [RS:2;3e788d7781dc:38469 {}] zookeeper.ZKUtil(111): regionserver:38469-0x100147ff5e00003, quorum=127.0.0.1:60518, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/3e788d7781dc,38469,1733910650808 2024-12-11T09:50:52,029 WARN [RS:2;3e788d7781dc:38469 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-11T09:50:52,029 INFO [RS:2;3e788d7781dc:38469 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-11T09:50:52,029 DEBUG [RS:0;3e788d7781dc:35681 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:34879/user/jenkins/test-data/0f1a18a3-705a-ac44-3173-a475130eeaf9/WALs/3e788d7781dc,35681,1733910650669 2024-12-11T09:50:52,029 DEBUG [RS:2;3e788d7781dc:38469 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:34879/user/jenkins/test-data/0f1a18a3-705a-ac44-3173-a475130eeaf9/WALs/3e788d7781dc,38469,1733910650808 2024-12-11T09:50:52,030 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [3e788d7781dc,35681,1733910650669] 2024-12-11T09:50:52,030 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [3e788d7781dc,44945,1733910650767] 2024-12-11T09:50:52,030 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [3e788d7781dc,38469,1733910650808] 2024-12-11T09:50:52,031 DEBUG [RS:1;3e788d7781dc:44945 {}] zookeeper.ZKUtil(111): regionserver:44945-0x100147ff5e00002, quorum=127.0.0.1:60518, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/3e788d7781dc,44945,1733910650767 2024-12-11T09:50:52,031 WARN [RS:1;3e788d7781dc:44945 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-11T09:50:52,031 INFO [RS:1;3e788d7781dc:44945 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-11T09:50:52,031 DEBUG [RS:1;3e788d7781dc:44945 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:34879/user/jenkins/test-data/0f1a18a3-705a-ac44-3173-a475130eeaf9/WALs/3e788d7781dc,44945,1733910650767 2024-12-11T09:50:52,052 INFO [RS:1;3e788d7781dc:44945 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-11T09:50:52,052 INFO [RS:2;3e788d7781dc:38469 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-11T09:50:52,052 INFO [RS:0;3e788d7781dc:35681 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-11T09:50:52,064 INFO [RS:1;3e788d7781dc:44945 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-11T09:50:52,064 INFO [RS:2;3e788d7781dc:38469 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-11T09:50:52,064 INFO [RS:0;3e788d7781dc:35681 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-11T09:50:52,069 INFO [RS:2;3e788d7781dc:38469 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-11T09:50:52,069 INFO [RS:1;3e788d7781dc:44945 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-11T09:50:52,069 INFO [RS:0;3e788d7781dc:35681 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-11T09:50:52,069 INFO [RS:2;3e788d7781dc:38469 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-11T09:50:52,069 INFO [RS:1;3e788d7781dc:44945 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-11T09:50:52,069 INFO [RS:0;3e788d7781dc:35681 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-11T09:50:52,070 INFO [RS:2;3e788d7781dc:38469 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-11T09:50:52,071 INFO [RS:0;3e788d7781dc:35681 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-11T09:50:52,071 INFO [RS:1;3e788d7781dc:44945 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-11T09:50:52,076 INFO [RS:1;3e788d7781dc:44945 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-11T09:50:52,076 INFO [RS:2;3e788d7781dc:38469 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-11T09:50:52,076 INFO [RS:0;3e788d7781dc:35681 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-11T09:50:52,077 INFO [RS:1;3e788d7781dc:44945 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-11T09:50:52,077 INFO [RS:2;3e788d7781dc:38469 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-11T09:50:52,077 INFO [RS:0;3e788d7781dc:35681 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-11T09:50:52,078 DEBUG [RS:2;3e788d7781dc:38469 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/3e788d7781dc:0, corePoolSize=1, maxPoolSize=1 2024-12-11T09:50:52,078 DEBUG [RS:0;3e788d7781dc:35681 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/3e788d7781dc:0, corePoolSize=1, maxPoolSize=1 2024-12-11T09:50:52,078 DEBUG [RS:1;3e788d7781dc:44945 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/3e788d7781dc:0, corePoolSize=1, maxPoolSize=1 2024-12-11T09:50:52,078 DEBUG [RS:0;3e788d7781dc:35681 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/3e788d7781dc:0, corePoolSize=1, maxPoolSize=1 2024-12-11T09:50:52,078 DEBUG [RS:2;3e788d7781dc:38469 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/3e788d7781dc:0, corePoolSize=1, maxPoolSize=1 2024-12-11T09:50:52,078 DEBUG [RS:1;3e788d7781dc:44945 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/3e788d7781dc:0, corePoolSize=1, maxPoolSize=1 2024-12-11T09:50:52,078 DEBUG [RS:0;3e788d7781dc:35681 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/3e788d7781dc:0, corePoolSize=1, maxPoolSize=1 2024-12-11T09:50:52,078 DEBUG [RS:2;3e788d7781dc:38469 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/3e788d7781dc:0, corePoolSize=1, maxPoolSize=1 2024-12-11T09:50:52,078 DEBUG [RS:1;3e788d7781dc:44945 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/3e788d7781dc:0, corePoolSize=1, maxPoolSize=1 2024-12-11T09:50:52,078 DEBUG [RS:0;3e788d7781dc:35681 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/3e788d7781dc:0, corePoolSize=1, maxPoolSize=1 2024-12-11T09:50:52,078 DEBUG [RS:2;3e788d7781dc:38469 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/3e788d7781dc:0, corePoolSize=1, maxPoolSize=1 2024-12-11T09:50:52,078 DEBUG [RS:1;3e788d7781dc:44945 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/3e788d7781dc:0, corePoolSize=1, maxPoolSize=1 2024-12-11T09:50:52,078 DEBUG [RS:0;3e788d7781dc:35681 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/3e788d7781dc:0, corePoolSize=1, maxPoolSize=1 2024-12-11T09:50:52,078 DEBUG [RS:2;3e788d7781dc:38469 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/3e788d7781dc:0, corePoolSize=1, maxPoolSize=1 2024-12-11T09:50:52,078 DEBUG [RS:1;3e788d7781dc:44945 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/3e788d7781dc:0, corePoolSize=1, maxPoolSize=1 2024-12-11T09:50:52,078 DEBUG [RS:0;3e788d7781dc:35681 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/3e788d7781dc:0, corePoolSize=2, maxPoolSize=2 2024-12-11T09:50:52,078 DEBUG [RS:2;3e788d7781dc:38469 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/3e788d7781dc:0, corePoolSize=2, maxPoolSize=2 2024-12-11T09:50:52,078 DEBUG [RS:1;3e788d7781dc:44945 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/3e788d7781dc:0, corePoolSize=2, maxPoolSize=2 2024-12-11T09:50:52,079 DEBUG [RS:2;3e788d7781dc:38469 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/3e788d7781dc:0, corePoolSize=1, maxPoolSize=1 2024-12-11T09:50:52,079 DEBUG [RS:0;3e788d7781dc:35681 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/3e788d7781dc:0, corePoolSize=1, maxPoolSize=1 2024-12-11T09:50:52,079 DEBUG [RS:1;3e788d7781dc:44945 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/3e788d7781dc:0, corePoolSize=1, maxPoolSize=1 2024-12-11T09:50:52,079 DEBUG [RS:0;3e788d7781dc:35681 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/3e788d7781dc:0, corePoolSize=1, maxPoolSize=1 2024-12-11T09:50:52,079 DEBUG [RS:2;3e788d7781dc:38469 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/3e788d7781dc:0, corePoolSize=1, maxPoolSize=1 2024-12-11T09:50:52,079 DEBUG [RS:1;3e788d7781dc:44945 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/3e788d7781dc:0, corePoolSize=1, maxPoolSize=1 2024-12-11T09:50:52,079 DEBUG [RS:0;3e788d7781dc:35681 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/3e788d7781dc:0, corePoolSize=1, maxPoolSize=1 2024-12-11T09:50:52,079 DEBUG [RS:2;3e788d7781dc:38469 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/3e788d7781dc:0, corePoolSize=1, maxPoolSize=1 2024-12-11T09:50:52,079 DEBUG [RS:1;3e788d7781dc:44945 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/3e788d7781dc:0, corePoolSize=1, maxPoolSize=1 2024-12-11T09:50:52,079 DEBUG [RS:0;3e788d7781dc:35681 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/3e788d7781dc:0, corePoolSize=1, maxPoolSize=1 2024-12-11T09:50:52,079 DEBUG [RS:2;3e788d7781dc:38469 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/3e788d7781dc:0, corePoolSize=1, maxPoolSize=1 2024-12-11T09:50:52,079 DEBUG [RS:1;3e788d7781dc:44945 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/3e788d7781dc:0, corePoolSize=1, maxPoolSize=1 2024-12-11T09:50:52,079 DEBUG [RS:0;3e788d7781dc:35681 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/3e788d7781dc:0, corePoolSize=1, maxPoolSize=1 2024-12-11T09:50:52,079 DEBUG [RS:2;3e788d7781dc:38469 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/3e788d7781dc:0, corePoolSize=1, maxPoolSize=1 2024-12-11T09:50:52,079 DEBUG [RS:1;3e788d7781dc:44945 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/3e788d7781dc:0, corePoolSize=1, maxPoolSize=1 2024-12-11T09:50:52,079 DEBUG [RS:2;3e788d7781dc:38469 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/3e788d7781dc:0, corePoolSize=1, maxPoolSize=1 2024-12-11T09:50:52,079 DEBUG [RS:0;3e788d7781dc:35681 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/3e788d7781dc:0, corePoolSize=1, maxPoolSize=1 2024-12-11T09:50:52,079 DEBUG [RS:1;3e788d7781dc:44945 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/3e788d7781dc:0, corePoolSize=1, maxPoolSize=1 2024-12-11T09:50:52,080 DEBUG [RS:0;3e788d7781dc:35681 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/3e788d7781dc:0, corePoolSize=3, maxPoolSize=3 2024-12-11T09:50:52,080 DEBUG [RS:2;3e788d7781dc:38469 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/3e788d7781dc:0, corePoolSize=3, maxPoolSize=3 2024-12-11T09:50:52,080 DEBUG [RS:1;3e788d7781dc:44945 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/3e788d7781dc:0, corePoolSize=3, maxPoolSize=3 2024-12-11T09:50:52,080 DEBUG [RS:0;3e788d7781dc:35681 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/3e788d7781dc:0, corePoolSize=3, maxPoolSize=3 2024-12-11T09:50:52,080 DEBUG [RS:2;3e788d7781dc:38469 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/3e788d7781dc:0, corePoolSize=3, maxPoolSize=3 2024-12-11T09:50:52,080 DEBUG [RS:1;3e788d7781dc:44945 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/3e788d7781dc:0, corePoolSize=3, maxPoolSize=3 2024-12-11T09:50:52,085 INFO [RS:1;3e788d7781dc:44945 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-11T09:50:52,085 INFO [RS:2;3e788d7781dc:38469 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-11T09:50:52,085 INFO [RS:0;3e788d7781dc:35681 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-11T09:50:52,085 INFO [RS:1;3e788d7781dc:44945 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-11T09:50:52,085 INFO [RS:2;3e788d7781dc:38469 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-11T09:50:52,085 INFO [RS:1;3e788d7781dc:44945 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-11T09:50:52,085 INFO [RS:2;3e788d7781dc:38469 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-11T09:50:52,085 INFO [RS:0;3e788d7781dc:35681 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-11T09:50:52,085 INFO [RS:1;3e788d7781dc:44945 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-11T09:50:52,085 INFO [RS:0;3e788d7781dc:35681 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-11T09:50:52,085 INFO [RS:2;3e788d7781dc:38469 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-11T09:50:52,085 INFO [RS:1;3e788d7781dc:44945 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-11T09:50:52,085 INFO [RS:0;3e788d7781dc:35681 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-11T09:50:52,085 INFO [RS:2;3e788d7781dc:38469 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-11T09:50:52,085 INFO [RS:1;3e788d7781dc:44945 {}] hbase.ChoreService(168): Chore ScheduledChore name=3e788d7781dc,44945,1733910650767-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-11T09:50:52,085 INFO [RS:2;3e788d7781dc:38469 {}] hbase.ChoreService(168): Chore ScheduledChore name=3e788d7781dc,38469,1733910650808-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-11T09:50:52,085 INFO [RS:0;3e788d7781dc:35681 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-11T09:50:52,085 INFO [RS:0;3e788d7781dc:35681 {}] hbase.ChoreService(168): Chore ScheduledChore name=3e788d7781dc,35681,1733910650669-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-11T09:50:52,108 INFO [RS:2;3e788d7781dc:38469 {}] regionserver.HeapMemoryManager(213): Starting, tuneOn=false 2024-12-11T09:50:52,108 INFO [RS:1;3e788d7781dc:44945 {}] regionserver.HeapMemoryManager(213): Starting, tuneOn=false 2024-12-11T09:50:52,108 INFO [RS:0;3e788d7781dc:35681 {}] regionserver.HeapMemoryManager(213): Starting, tuneOn=false 2024-12-11T09:50:52,111 INFO [RS:0;3e788d7781dc:35681 {}] hbase.ChoreService(168): Chore ScheduledChore name=3e788d7781dc,35681,1733910650669-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-11T09:50:52,111 INFO [RS:2;3e788d7781dc:38469 {}] hbase.ChoreService(168): Chore ScheduledChore name=3e788d7781dc,38469,1733910650808-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-11T09:50:52,111 INFO [RS:1;3e788d7781dc:44945 {}] hbase.ChoreService(168): Chore ScheduledChore name=3e788d7781dc,44945,1733910650767-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-11T09:50:52,111 INFO [RS:1;3e788d7781dc:44945 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-11T09:50:52,111 INFO [RS:0;3e788d7781dc:35681 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-11T09:50:52,111 INFO [RS:2;3e788d7781dc:38469 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-11T09:50:52,111 INFO [RS:1;3e788d7781dc:44945 {}] regionserver.Replication(171): 3e788d7781dc,44945,1733910650767 started 2024-12-11T09:50:52,111 INFO [RS:0;3e788d7781dc:35681 {}] regionserver.Replication(171): 3e788d7781dc,35681,1733910650669 started 2024-12-11T09:50:52,111 INFO [RS:2;3e788d7781dc:38469 {}] regionserver.Replication(171): 3e788d7781dc,38469,1733910650808 started 2024-12-11T09:50:52,126 INFO [RS:0;3e788d7781dc:35681 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-11T09:50:52,127 INFO [RS:0;3e788d7781dc:35681 {}] regionserver.HRegionServer(1482): Serving as 3e788d7781dc,35681,1733910650669, RpcServer on 3e788d7781dc/172.17.0.2:35681, sessionid=0x100147ff5e00001 2024-12-11T09:50:52,127 INFO [RS:1;3e788d7781dc:44945 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-11T09:50:52,127 INFO [RS:1;3e788d7781dc:44945 {}] regionserver.HRegionServer(1482): Serving as 3e788d7781dc,44945,1733910650767, RpcServer on 3e788d7781dc/172.17.0.2:44945, sessionid=0x100147ff5e00002 2024-12-11T09:50:52,128 DEBUG [RS:0;3e788d7781dc:35681 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-11T09:50:52,128 DEBUG [RS:1;3e788d7781dc:44945 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-11T09:50:52,128 DEBUG [RS:0;3e788d7781dc:35681 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 3e788d7781dc,35681,1733910650669 2024-12-11T09:50:52,128 DEBUG [RS:1;3e788d7781dc:44945 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 3e788d7781dc,44945,1733910650767 2024-12-11T09:50:52,128 DEBUG [RS:0;3e788d7781dc:35681 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '3e788d7781dc,35681,1733910650669' 2024-12-11T09:50:52,128 DEBUG [RS:1;3e788d7781dc:44945 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '3e788d7781dc,44945,1733910650767' 2024-12-11T09:50:52,128 DEBUG [RS:0;3e788d7781dc:35681 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-11T09:50:52,128 DEBUG [RS:1;3e788d7781dc:44945 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-11T09:50:52,129 DEBUG [RS:1;3e788d7781dc:44945 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-11T09:50:52,129 DEBUG [RS:0;3e788d7781dc:35681 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-11T09:50:52,129 INFO [RS:2;3e788d7781dc:38469 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-11T09:50:52,129 INFO [RS:2;3e788d7781dc:38469 {}] regionserver.HRegionServer(1482): Serving as 3e788d7781dc,38469,1733910650808, RpcServer on 3e788d7781dc/172.17.0.2:38469, sessionid=0x100147ff5e00003 2024-12-11T09:50:52,130 DEBUG [RS:1;3e788d7781dc:44945 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-11T09:50:52,130 DEBUG [RS:0;3e788d7781dc:35681 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-11T09:50:52,130 DEBUG [RS:1;3e788d7781dc:44945 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-11T09:50:52,130 DEBUG [RS:0;3e788d7781dc:35681 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-11T09:50:52,130 DEBUG [RS:2;3e788d7781dc:38469 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-11T09:50:52,130 DEBUG [RS:2;3e788d7781dc:38469 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 3e788d7781dc,38469,1733910650808 2024-12-11T09:50:52,130 DEBUG [RS:1;3e788d7781dc:44945 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 3e788d7781dc,44945,1733910650767 2024-12-11T09:50:52,130 DEBUG [RS:0;3e788d7781dc:35681 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 3e788d7781dc,35681,1733910650669 2024-12-11T09:50:52,130 DEBUG [RS:2;3e788d7781dc:38469 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '3e788d7781dc,38469,1733910650808' 2024-12-11T09:50:52,130 DEBUG [RS:1;3e788d7781dc:44945 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '3e788d7781dc,44945,1733910650767' 2024-12-11T09:50:52,130 DEBUG [RS:2;3e788d7781dc:38469 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-11T09:50:52,130 DEBUG [RS:1;3e788d7781dc:44945 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-11T09:50:52,130 DEBUG [RS:0;3e788d7781dc:35681 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '3e788d7781dc,35681,1733910650669' 2024-12-11T09:50:52,130 DEBUG [RS:0;3e788d7781dc:35681 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-11T09:50:52,130 DEBUG [RS:1;3e788d7781dc:44945 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-11T09:50:52,130 DEBUG [RS:2;3e788d7781dc:38469 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-11T09:50:52,131 DEBUG [RS:0;3e788d7781dc:35681 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-11T09:50:52,131 DEBUG [RS:1;3e788d7781dc:44945 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-11T09:50:52,131 INFO [RS:1;3e788d7781dc:44945 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-11T09:50:52,131 DEBUG [RS:0;3e788d7781dc:35681 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-11T09:50:52,131 INFO [RS:0;3e788d7781dc:35681 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-11T09:50:52,131 DEBUG [RS:2;3e788d7781dc:38469 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-11T09:50:52,131 INFO [RS:1;3e788d7781dc:44945 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-11T09:50:52,131 INFO [RS:0;3e788d7781dc:35681 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-11T09:50:52,131 DEBUG [RS:2;3e788d7781dc:38469 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-11T09:50:52,131 DEBUG [RS:2;3e788d7781dc:38469 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 3e788d7781dc,38469,1733910650808 2024-12-11T09:50:52,131 DEBUG [RS:2;3e788d7781dc:38469 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '3e788d7781dc,38469,1733910650808' 2024-12-11T09:50:52,131 DEBUG [RS:2;3e788d7781dc:38469 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-11T09:50:52,132 DEBUG [RS:2;3e788d7781dc:38469 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-11T09:50:52,132 DEBUG [RS:2;3e788d7781dc:38469 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-11T09:50:52,133 INFO [RS:2;3e788d7781dc:38469 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-11T09:50:52,133 INFO [RS:2;3e788d7781dc:38469 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-11T09:50:52,177 WARN [3e788d7781dc:40323 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-11T09:50:52,243 INFO [RS:1;3e788d7781dc:44945 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-11T09:50:52,243 INFO [RS:0;3e788d7781dc:35681 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-11T09:50:52,243 INFO [RS:2;3e788d7781dc:38469 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-11T09:50:52,246 INFO [RS:1;3e788d7781dc:44945 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=3e788d7781dc%2C44945%2C1733910650767, suffix=, logDir=hdfs://localhost:34879/user/jenkins/test-data/0f1a18a3-705a-ac44-3173-a475130eeaf9/WALs/3e788d7781dc,44945,1733910650767, archiveDir=hdfs://localhost:34879/user/jenkins/test-data/0f1a18a3-705a-ac44-3173-a475130eeaf9/oldWALs, maxLogs=32 2024-12-11T09:50:52,246 INFO [RS:2;3e788d7781dc:38469 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=3e788d7781dc%2C38469%2C1733910650808, suffix=, logDir=hdfs://localhost:34879/user/jenkins/test-data/0f1a18a3-705a-ac44-3173-a475130eeaf9/WALs/3e788d7781dc,38469,1733910650808, archiveDir=hdfs://localhost:34879/user/jenkins/test-data/0f1a18a3-705a-ac44-3173-a475130eeaf9/oldWALs, maxLogs=32 2024-12-11T09:50:52,246 INFO [RS:0;3e788d7781dc:35681 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=3e788d7781dc%2C35681%2C1733910650669, suffix=, logDir=hdfs://localhost:34879/user/jenkins/test-data/0f1a18a3-705a-ac44-3173-a475130eeaf9/WALs/3e788d7781dc,35681,1733910650669, archiveDir=hdfs://localhost:34879/user/jenkins/test-data/0f1a18a3-705a-ac44-3173-a475130eeaf9/oldWALs, maxLogs=32 2024-12-11T09:50:52,261 DEBUG [RS:1;3e788d7781dc:44945 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/0f1a18a3-705a-ac44-3173-a475130eeaf9/WALs/3e788d7781dc,44945,1733910650767/3e788d7781dc%2C44945%2C1733910650767.1733910652249, exclude list is [], retry=0 2024-12-11T09:50:52,261 DEBUG [RS:0;3e788d7781dc:35681 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/0f1a18a3-705a-ac44-3173-a475130eeaf9/WALs/3e788d7781dc,35681,1733910650669/3e788d7781dc%2C35681%2C1733910650669.1733910652249, exclude list is [], retry=0 2024-12-11T09:50:52,262 DEBUG [RS:2;3e788d7781dc:38469 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/0f1a18a3-705a-ac44-3173-a475130eeaf9/WALs/3e788d7781dc,38469,1733910650808/3e788d7781dc%2C38469%2C1733910650808.1733910652249, exclude list is [], retry=0 2024-12-11T09:50:52,266 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46813,DS-a6fa4e0c-76fa-4357-8e30-177ad2fb02ae,DISK] 2024-12-11T09:50:52,266 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44455,DS-ba2bc2cd-6d41-4b86-8ac0-1c2621f21ea7,DISK] 2024-12-11T09:50:52,266 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46813,DS-a6fa4e0c-76fa-4357-8e30-177ad2fb02ae,DISK] 2024-12-11T09:50:52,267 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44455,DS-ba2bc2cd-6d41-4b86-8ac0-1c2621f21ea7,DISK] 2024-12-11T09:50:52,267 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:41951,DS-579cfa22-dfa0-4809-80fe-88882a01644b,DISK] 2024-12-11T09:50:52,267 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44455,DS-ba2bc2cd-6d41-4b86-8ac0-1c2621f21ea7,DISK] 2024-12-11T09:50:52,284 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:41951,DS-579cfa22-dfa0-4809-80fe-88882a01644b,DISK] 2024-12-11T09:50:52,284 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46813,DS-a6fa4e0c-76fa-4357-8e30-177ad2fb02ae,DISK] 2024-12-11T09:50:52,285 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:41951,DS-579cfa22-dfa0-4809-80fe-88882a01644b,DISK] 2024-12-11T09:50:52,290 INFO [RS:0;3e788d7781dc:35681 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/0f1a18a3-705a-ac44-3173-a475130eeaf9/WALs/3e788d7781dc,35681,1733910650669/3e788d7781dc%2C35681%2C1733910650669.1733910652249 2024-12-11T09:50:52,290 INFO [RS:1;3e788d7781dc:44945 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/0f1a18a3-705a-ac44-3173-a475130eeaf9/WALs/3e788d7781dc,44945,1733910650767/3e788d7781dc%2C44945%2C1733910650767.1733910652249 2024-12-11T09:50:52,292 INFO [RS:2;3e788d7781dc:38469 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/0f1a18a3-705a-ac44-3173-a475130eeaf9/WALs/3e788d7781dc,38469,1733910650808/3e788d7781dc%2C38469%2C1733910650808.1733910652249 2024-12-11T09:50:52,293 DEBUG [RS:0;3e788d7781dc:35681 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:45321:45321),(127.0.0.1/127.0.0.1:42503:42503),(127.0.0.1/127.0.0.1:38753:38753)] 2024-12-11T09:50:52,295 DEBUG [RS:1;3e788d7781dc:44945 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:42503:42503),(127.0.0.1/127.0.0.1:45321:45321),(127.0.0.1/127.0.0.1:38753:38753)] 2024-12-11T09:50:52,296 DEBUG [RS:2;3e788d7781dc:38469 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:45321:45321),(127.0.0.1/127.0.0.1:42503:42503),(127.0.0.1/127.0.0.1:38753:38753)] 2024-12-11T09:50:52,430 DEBUG [3e788d7781dc:40323 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=3, allServersCount=3 2024-12-11T09:50:52,439 DEBUG [3e788d7781dc:40323 {}] balancer.BalancerClusterState(204): Hosts are {3e788d7781dc=0} racks are {/default-rack=0} 2024-12-11T09:50:52,445 DEBUG [3e788d7781dc:40323 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-11T09:50:52,445 DEBUG [3e788d7781dc:40323 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-11T09:50:52,445 DEBUG [3e788d7781dc:40323 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-11T09:50:52,445 DEBUG [3e788d7781dc:40323 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-11T09:50:52,445 DEBUG [3e788d7781dc:40323 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-11T09:50:52,445 DEBUG [3e788d7781dc:40323 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-11T09:50:52,445 INFO [3e788d7781dc:40323 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-11T09:50:52,445 INFO [3e788d7781dc:40323 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-11T09:50:52,445 INFO [3e788d7781dc:40323 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-11T09:50:52,445 DEBUG [3e788d7781dc:40323 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-11T09:50:52,451 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=3e788d7781dc,35681,1733910650669 2024-12-11T09:50:52,457 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 3e788d7781dc,35681,1733910650669, state=OPENING 2024-12-11T09:50:52,501 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-11T09:50:52,509 DEBUG [pool-65-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35681-0x100147ff5e00001, quorum=127.0.0.1:60518, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T09:50:52,510 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40323-0x100147ff5e00000, quorum=127.0.0.1:60518, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T09:50:52,510 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44945-0x100147ff5e00002, quorum=127.0.0.1:60518, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T09:50:52,510 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38469-0x100147ff5e00003, quorum=127.0.0.1:60518, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T09:50:52,511 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-11T09:50:52,512 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-11T09:50:52,512 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-11T09:50:52,512 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-11T09:50:52,515 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-11T09:50:52,518 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=3e788d7781dc,35681,1733910650669}] 2024-12-11T09:50:52,697 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-11T09:50:52,699 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49527, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-11T09:50:52,727 INFO [RS_OPEN_META-regionserver/3e788d7781dc:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-11T09:50:52,727 INFO [RS_OPEN_META-regionserver/3e788d7781dc:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-11T09:50:52,728 INFO [RS_OPEN_META-regionserver/3e788d7781dc:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-12-11T09:50:52,731 INFO [RS_OPEN_META-regionserver/3e788d7781dc:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=3e788d7781dc%2C35681%2C1733910650669.meta, suffix=.meta, logDir=hdfs://localhost:34879/user/jenkins/test-data/0f1a18a3-705a-ac44-3173-a475130eeaf9/WALs/3e788d7781dc,35681,1733910650669, archiveDir=hdfs://localhost:34879/user/jenkins/test-data/0f1a18a3-705a-ac44-3173-a475130eeaf9/oldWALs, maxLogs=32 2024-12-11T09:50:52,748 DEBUG [RS_OPEN_META-regionserver/3e788d7781dc:0-0 {event_type=M_RS_OPEN_META, pid=3}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/0f1a18a3-705a-ac44-3173-a475130eeaf9/WALs/3e788d7781dc,35681,1733910650669/3e788d7781dc%2C35681%2C1733910650669.meta.1733910652732.meta, exclude list is [], retry=0 2024-12-11T09:50:52,751 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:41951,DS-579cfa22-dfa0-4809-80fe-88882a01644b,DISK] 2024-12-11T09:50:52,751 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44455,DS-ba2bc2cd-6d41-4b86-8ac0-1c2621f21ea7,DISK] 2024-12-11T09:50:52,752 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46813,DS-a6fa4e0c-76fa-4357-8e30-177ad2fb02ae,DISK] 2024-12-11T09:50:52,757 INFO [RS_OPEN_META-regionserver/3e788d7781dc:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/0f1a18a3-705a-ac44-3173-a475130eeaf9/WALs/3e788d7781dc,35681,1733910650669/3e788d7781dc%2C35681%2C1733910650669.meta.1733910652732.meta 2024-12-11T09:50:52,758 DEBUG [RS_OPEN_META-regionserver/3e788d7781dc:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:38753:38753),(127.0.0.1/127.0.0.1:45321:45321),(127.0.0.1/127.0.0.1:42503:42503)] 2024-12-11T09:50:52,758 DEBUG [RS_OPEN_META-regionserver/3e788d7781dc:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-11T09:50:52,760 DEBUG [RS_OPEN_META-regionserver/3e788d7781dc:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-11T09:50:52,763 DEBUG [RS_OPEN_META-regionserver/3e788d7781dc:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-11T09:50:52,767 INFO [RS_OPEN_META-regionserver/3e788d7781dc:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-11T09:50:52,771 DEBUG [RS_OPEN_META-regionserver/3e788d7781dc:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-11T09:50:52,771 DEBUG [RS_OPEN_META-regionserver/3e788d7781dc:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T09:50:52,771 DEBUG [RS_OPEN_META-regionserver/3e788d7781dc:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-11T09:50:52,771 DEBUG [RS_OPEN_META-regionserver/3e788d7781dc:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-11T09:50:52,774 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-11T09:50:52,776 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-11T09:50:52,776 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T09:50:52,777 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-11T09:50:52,777 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-11T09:50:52,779 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-11T09:50:52,779 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T09:50:52,780 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-11T09:50:52,780 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-11T09:50:52,781 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-11T09:50:52,781 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T09:50:52,782 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-11T09:50:52,782 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-11T09:50:52,784 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-11T09:50:52,784 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T09:50:52,785 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-11T09:50:52,785 DEBUG [RS_OPEN_META-regionserver/3e788d7781dc:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-11T09:50:52,786 DEBUG [RS_OPEN_META-regionserver/3e788d7781dc:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34879/user/jenkins/test-data/0f1a18a3-705a-ac44-3173-a475130eeaf9/data/hbase/meta/1588230740 2024-12-11T09:50:52,789 DEBUG [RS_OPEN_META-regionserver/3e788d7781dc:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34879/user/jenkins/test-data/0f1a18a3-705a-ac44-3173-a475130eeaf9/data/hbase/meta/1588230740 2024-12-11T09:50:52,791 DEBUG [RS_OPEN_META-regionserver/3e788d7781dc:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-11T09:50:52,791 DEBUG [RS_OPEN_META-regionserver/3e788d7781dc:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-11T09:50:52,792 DEBUG [RS_OPEN_META-regionserver/3e788d7781dc:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-11T09:50:52,794 DEBUG [RS_OPEN_META-regionserver/3e788d7781dc:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-11T09:50:52,795 INFO [RS_OPEN_META-regionserver/3e788d7781dc:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=75461985, jitterRate=0.12447120249271393}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-11T09:50:52,795 DEBUG [RS_OPEN_META-regionserver/3e788d7781dc:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-11T09:50:52,796 DEBUG [RS_OPEN_META-regionserver/3e788d7781dc:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733910652772Writing region info on filesystem at 1733910652772Initializing all the Stores at 1733910652774 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733910652774Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733910652774Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733910652774Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733910652774Cleaning up temporary data from old regions at 1733910652791 (+17 ms)Running coprocessor post-open hooks at 1733910652795 (+4 ms)Region opened successfully at 1733910652796 (+1 ms) 2024-12-11T09:50:52,803 INFO [RS_OPEN_META-regionserver/3e788d7781dc:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733910652686 2024-12-11T09:50:52,813 DEBUG [RS_OPEN_META-regionserver/3e788d7781dc:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-11T09:50:52,813 INFO [RS_OPEN_META-regionserver/3e788d7781dc:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-11T09:50:52,814 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=3e788d7781dc,35681,1733910650669 2024-12-11T09:50:52,816 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 3e788d7781dc,35681,1733910650669, state=OPEN 2024-12-11T09:50:52,876 DEBUG [pool-65-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35681-0x100147ff5e00001, quorum=127.0.0.1:60518, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-11T09:50:52,876 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44945-0x100147ff5e00002, quorum=127.0.0.1:60518, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-11T09:50:52,876 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40323-0x100147ff5e00000, quorum=127.0.0.1:60518, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-11T09:50:52,876 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38469-0x100147ff5e00003, quorum=127.0.0.1:60518, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-11T09:50:52,877 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-11T09:50:52,877 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-11T09:50:52,877 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-11T09:50:52,877 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-11T09:50:52,878 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=3e788d7781dc,35681,1733910650669 2024-12-11T09:50:52,888 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-11T09:50:52,888 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=3e788d7781dc,35681,1733910650669 in 360 msec 2024-12-11T09:50:52,894 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-11T09:50:52,895 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 873 msec 2024-12-11T09:50:52,897 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-11T09:50:52,897 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-11T09:50:52,913 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-11T09:50:52,914 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3e788d7781dc,35681,1733910650669, seqNum=-1] 2024-12-11T09:50:52,930 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T09:50:52,932 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40955, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T09:50:52,950 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.1650 sec 2024-12-11T09:50:52,950 INFO [master/3e788d7781dc:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733910652950, completionTime=-1 2024-12-11T09:50:52,952 INFO [master/3e788d7781dc:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=3; waited=0ms, expected min=3 server(s), max=3 server(s), master is running 2024-12-11T09:50:52,953 DEBUG [master/3e788d7781dc:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-11T09:50:52,979 INFO [master/3e788d7781dc:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=3 2024-12-11T09:50:52,979 INFO [master/3e788d7781dc:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733910712979 2024-12-11T09:50:52,979 INFO [master/3e788d7781dc:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733910772979 2024-12-11T09:50:52,979 INFO [master/3e788d7781dc:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 26 msec 2024-12-11T09:50:52,981 DEBUG [master/3e788d7781dc:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 1588230740 changed from -1.0 to 0.0, refreshing cache 2024-12-11T09:50:52,987 INFO [master/3e788d7781dc:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3e788d7781dc,40323,1733910649947-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-11T09:50:52,988 INFO [master/3e788d7781dc:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3e788d7781dc,40323,1733910649947-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-11T09:50:52,988 INFO [master/3e788d7781dc:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3e788d7781dc,40323,1733910649947-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-11T09:50:52,989 INFO [master/3e788d7781dc:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-3e788d7781dc:40323, period=300000, unit=MILLISECONDS is enabled. 2024-12-11T09:50:52,990 INFO [master/3e788d7781dc:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-11T09:50:52,990 INFO [master/3e788d7781dc:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-11T09:50:52,996 DEBUG [master/3e788d7781dc:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-11T09:50:53,019 INFO [master/3e788d7781dc:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 2.101sec 2024-12-11T09:50:53,020 INFO [master/3e788d7781dc:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-11T09:50:53,021 INFO [master/3e788d7781dc:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-11T09:50:53,022 INFO [master/3e788d7781dc:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-11T09:50:53,022 INFO [master/3e788d7781dc:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-11T09:50:53,022 INFO [master/3e788d7781dc:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-11T09:50:53,023 INFO [master/3e788d7781dc:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3e788d7781dc,40323,1733910649947-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-11T09:50:53,023 INFO [master/3e788d7781dc:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3e788d7781dc,40323,1733910649947-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-11T09:50:53,027 DEBUG [master/3e788d7781dc:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-11T09:50:53,029 INFO [master/3e788d7781dc:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-11T09:50:53,029 INFO [master/3e788d7781dc:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3e788d7781dc,40323,1733910649947-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-11T09:50:53,057 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@611afac1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T09:50:53,061 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-12-11T09:50:53,061 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-12-11T09:50:53,064 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 3e788d7781dc,40323,-1 for getting cluster id 2024-12-11T09:50:53,066 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-11T09:50:53,075 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '1bb11542-c30d-4b85-9a0c-1185c87ff497' 2024-12-11T09:50:53,077 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-11T09:50:53,077 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "1bb11542-c30d-4b85-9a0c-1185c87ff497" 2024-12-11T09:50:53,080 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4a0dec0d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T09:50:53,080 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [3e788d7781dc,40323,-1] 2024-12-11T09:50:53,083 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-11T09:50:53,085 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T09:50:53,086 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35080, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-11T09:50:53,089 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@34d9f045, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T09:50:53,090 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-11T09:50:53,096 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3e788d7781dc,35681,1733910650669, seqNum=-1] 2024-12-11T09:50:53,097 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T09:50:53,099 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58242, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T09:50:53,117 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=3e788d7781dc,40323,1733910649947 2024-12-11T09:50:53,120 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-11T09:50:53,125 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.AsyncConnectionImpl(321): The fetched master address is 3e788d7781dc,40323,1733910649947 2024-12-11T09:50:53,127 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@f958a0d 2024-12-11T09:50:53,127 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-11T09:50:53,130 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35088, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-11T09:50:53,135 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40323 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-11T09:50:53,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40323 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC 2024-12-11T09:50:53,146 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_PRE_OPERATION 2024-12-11T09:50:53,148 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40323 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestHBaseWalOnEC" procId is: 4 2024-12-11T09:50:53,149 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T09:50:53,151 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-11T09:50:53,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40323 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-11T09:50:53,162 WARN [PEWorker-3 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-11T09:50:53,162 WARN [PEWorker-3 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-11T09:50:53,168 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-366492811_22 at /127.0.0.1:54800 [Receiving block BP-1086906460-172.17.0.2-1733910646032:blk_-9223372036854775680_1020] {}] datanode.DataXceiver(331): 127.0.0.1:41951:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54800 dst: /127.0.0.1:41951 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T09:50:53,174 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41951 is added to blk_-9223372036854775680_1021 (size=392) 2024-12-11T09:50:53,175 WARN [PEWorker-3 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-11T09:50:53,177 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => e0a9bcfe176438ff327dee8ed53f557b, NAME => 'TestHBaseWalOnEC,,1733910653131.e0a9bcfe176438ff327dee8ed53f557b.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:34879/user/jenkins/test-data/0f1a18a3-705a-ac44-3173-a475130eeaf9 2024-12-11T09:50:53,183 WARN [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-11T09:50:53,184 WARN [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-11T09:50:53,186 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-366492811_22 at /127.0.0.1:33334 [Receiving block BP-1086906460-172.17.0.2-1733910646032:blk_-9223372036854775664_1022] {}] datanode.DataXceiver(331): 127.0.0.1:44455:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:33334 dst: /127.0.0.1:44455 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T09:50:53,193 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44455 is added to blk_-9223372036854775664_1023 (size=51) 2024-12-11T09:50:53,193 WARN [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-11T09:50:53,194 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1733910653131.e0a9bcfe176438ff327dee8ed53f557b.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T09:50:53,194 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1722): Closing e0a9bcfe176438ff327dee8ed53f557b, disabling compactions & flushes 2024-12-11T09:50:53,194 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1733910653131.e0a9bcfe176438ff327dee8ed53f557b. 2024-12-11T09:50:53,194 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1733910653131.e0a9bcfe176438ff327dee8ed53f557b. 2024-12-11T09:50:53,194 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1733910653131.e0a9bcfe176438ff327dee8ed53f557b. after waiting 0 ms 2024-12-11T09:50:53,194 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1733910653131.e0a9bcfe176438ff327dee8ed53f557b. 2024-12-11T09:50:53,194 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1733910653131.e0a9bcfe176438ff327dee8ed53f557b. 2024-12-11T09:50:53,194 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1676): Region close journal for e0a9bcfe176438ff327dee8ed53f557b: Waiting for close lock at 1733910653194Disabling compacts and flushes for region at 1733910653194Disabling writes for close at 1733910653194Writing region close event to WAL at 1733910653194Closed at 1733910653194 2024-12-11T09:50:53,197 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ADD_TO_META 2024-12-11T09:50:53,201 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestHBaseWalOnEC,,1733910653131.e0a9bcfe176438ff327dee8ed53f557b.","families":{"info":[{"qualifier":"regioninfo","vlen":50,"tag":[],"timestamp":"1733910653197"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733910653197"}]},"ts":"1733910653197"} 2024-12-11T09:50:53,205 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-11T09:50:53,207 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-11T09:50:53,210 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733910653208"}]},"ts":"1733910653208"} 2024-12-11T09:50:53,214 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLING in hbase:meta 2024-12-11T09:50:53,215 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {3e788d7781dc=0} racks are {/default-rack=0} 2024-12-11T09:50:53,216 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-11T09:50:53,216 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-11T09:50:53,216 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-11T09:50:53,216 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-11T09:50:53,216 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-11T09:50:53,216 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-11T09:50:53,216 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-11T09:50:53,216 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-11T09:50:53,216 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-11T09:50:53,216 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-11T09:50:53,218 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=e0a9bcfe176438ff327dee8ed53f557b, ASSIGN}] 2024-12-11T09:50:53,220 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=e0a9bcfe176438ff327dee8ed53f557b, ASSIGN 2024-12-11T09:50:53,222 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=e0a9bcfe176438ff327dee8ed53f557b, ASSIGN; state=OFFLINE, location=3e788d7781dc,38469,1733910650808; forceNewPlan=false, retain=false 2024-12-11T09:50:53,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40323 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-11T09:50:53,375 INFO [3e788d7781dc:40323 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-11T09:50:53,376 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=e0a9bcfe176438ff327dee8ed53f557b, regionState=OPENING, regionLocation=3e788d7781dc,38469,1733910650808 2024-12-11T09:50:53,380 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=e0a9bcfe176438ff327dee8ed53f557b, ASSIGN because future has completed 2024-12-11T09:50:53,381 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure e0a9bcfe176438ff327dee8ed53f557b, server=3e788d7781dc,38469,1733910650808}] 2024-12-11T09:50:53,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40323 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-11T09:50:53,537 DEBUG [RSProcedureDispatcher-pool-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-11T09:50:53,541 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53237, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-11T09:50:53,552 INFO [RS_OPEN_REGION-regionserver/3e788d7781dc:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestHBaseWalOnEC,,1733910653131.e0a9bcfe176438ff327dee8ed53f557b. 2024-12-11T09:50:53,552 DEBUG [RS_OPEN_REGION-regionserver/3e788d7781dc:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => e0a9bcfe176438ff327dee8ed53f557b, NAME => 'TestHBaseWalOnEC,,1733910653131.e0a9bcfe176438ff327dee8ed53f557b.', STARTKEY => '', ENDKEY => ''} 2024-12-11T09:50:53,552 DEBUG [RS_OPEN_REGION-regionserver/3e788d7781dc:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestHBaseWalOnEC e0a9bcfe176438ff327dee8ed53f557b 2024-12-11T09:50:53,552 DEBUG [RS_OPEN_REGION-regionserver/3e788d7781dc:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1733910653131.e0a9bcfe176438ff327dee8ed53f557b.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T09:50:53,552 DEBUG [RS_OPEN_REGION-regionserver/3e788d7781dc:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for e0a9bcfe176438ff327dee8ed53f557b 2024-12-11T09:50:53,553 DEBUG [RS_OPEN_REGION-regionserver/3e788d7781dc:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for e0a9bcfe176438ff327dee8ed53f557b 2024-12-11T09:50:53,555 INFO [StoreOpener-e0a9bcfe176438ff327dee8ed53f557b-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region e0a9bcfe176438ff327dee8ed53f557b 2024-12-11T09:50:53,557 INFO [StoreOpener-e0a9bcfe176438ff327dee8ed53f557b-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region e0a9bcfe176438ff327dee8ed53f557b columnFamilyName cf 2024-12-11T09:50:53,557 DEBUG [StoreOpener-e0a9bcfe176438ff327dee8ed53f557b-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T09:50:53,558 INFO [StoreOpener-e0a9bcfe176438ff327dee8ed53f557b-1 {}] regionserver.HStore(327): Store=e0a9bcfe176438ff327dee8ed53f557b/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T09:50:53,558 DEBUG [RS_OPEN_REGION-regionserver/3e788d7781dc:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for e0a9bcfe176438ff327dee8ed53f557b 2024-12-11T09:50:53,559 DEBUG [RS_OPEN_REGION-regionserver/3e788d7781dc:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34879/user/jenkins/test-data/0f1a18a3-705a-ac44-3173-a475130eeaf9/data/default/TestHBaseWalOnEC/e0a9bcfe176438ff327dee8ed53f557b 2024-12-11T09:50:53,560 DEBUG [RS_OPEN_REGION-regionserver/3e788d7781dc:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34879/user/jenkins/test-data/0f1a18a3-705a-ac44-3173-a475130eeaf9/data/default/TestHBaseWalOnEC/e0a9bcfe176438ff327dee8ed53f557b 2024-12-11T09:50:53,560 DEBUG [RS_OPEN_REGION-regionserver/3e788d7781dc:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for e0a9bcfe176438ff327dee8ed53f557b 2024-12-11T09:50:53,560 DEBUG [RS_OPEN_REGION-regionserver/3e788d7781dc:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for e0a9bcfe176438ff327dee8ed53f557b 2024-12-11T09:50:53,563 DEBUG [RS_OPEN_REGION-regionserver/3e788d7781dc:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for e0a9bcfe176438ff327dee8ed53f557b 2024-12-11T09:50:53,568 DEBUG [RS_OPEN_REGION-regionserver/3e788d7781dc:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34879/user/jenkins/test-data/0f1a18a3-705a-ac44-3173-a475130eeaf9/data/default/TestHBaseWalOnEC/e0a9bcfe176438ff327dee8ed53f557b/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-11T09:50:53,569 INFO [RS_OPEN_REGION-regionserver/3e788d7781dc:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened e0a9bcfe176438ff327dee8ed53f557b; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=65407781, jitterRate=-0.025348111987113953}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-11T09:50:53,569 DEBUG [RS_OPEN_REGION-regionserver/3e788d7781dc:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for e0a9bcfe176438ff327dee8ed53f557b 2024-12-11T09:50:53,569 DEBUG [RS_OPEN_REGION-regionserver/3e788d7781dc:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for e0a9bcfe176438ff327dee8ed53f557b: Running coprocessor pre-open hook at 1733910653553Writing region info on filesystem at 1733910653553Initializing all the Stores at 1733910653554 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733910653554Cleaning up temporary data from old regions at 1733910653560 (+6 ms)Running coprocessor post-open hooks at 1733910653569 (+9 ms)Region opened successfully at 1733910653569 2024-12-11T09:50:53,571 INFO [RS_OPEN_REGION-regionserver/3e788d7781dc:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestHBaseWalOnEC,,1733910653131.e0a9bcfe176438ff327dee8ed53f557b., pid=6, masterSystemTime=1733910653537 2024-12-11T09:50:53,575 DEBUG [RS_OPEN_REGION-regionserver/3e788d7781dc:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestHBaseWalOnEC,,1733910653131.e0a9bcfe176438ff327dee8ed53f557b. 2024-12-11T09:50:53,575 INFO [RS_OPEN_REGION-regionserver/3e788d7781dc:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestHBaseWalOnEC,,1733910653131.e0a9bcfe176438ff327dee8ed53f557b. 2024-12-11T09:50:53,576 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=e0a9bcfe176438ff327dee8ed53f557b, regionState=OPEN, openSeqNum=2, regionLocation=3e788d7781dc,38469,1733910650808 2024-12-11T09:50:53,579 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure e0a9bcfe176438ff327dee8ed53f557b, server=3e788d7781dc,38469,1733910650808 because future has completed 2024-12-11T09:50:53,585 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-11T09:50:53,585 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure e0a9bcfe176438ff327dee8ed53f557b, server=3e788d7781dc,38469,1733910650808 in 200 msec 2024-12-11T09:50:53,588 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-11T09:50:53,588 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=e0a9bcfe176438ff327dee8ed53f557b, ASSIGN in 367 msec 2024-12-11T09:50:53,590 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-11T09:50:53,590 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733910653590"}]},"ts":"1733910653590"} 2024-12-11T09:50:53,593 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLED in hbase:meta 2024-12-11T09:50:53,595 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_POST_OPERATION 2024-12-11T09:50:53,598 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC in 457 msec 2024-12-11T09:50:53,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40323 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-11T09:50:53,786 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestHBaseWalOnEC completed 2024-12-11T09:50:53,786 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table TestHBaseWalOnEC get assigned. Timeout = 60000ms 2024-12-11T09:50:53,788 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-11T09:50:53,794 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table TestHBaseWalOnEC assigned to meta. Checking AM states. 2024-12-11T09:50:53,795 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-11T09:50:53,795 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table TestHBaseWalOnEC assigned. 2024-12-11T09:50:53,804 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestHBaseWalOnEC', row='row', locateType=CURRENT is [region=TestHBaseWalOnEC,,1733910653131.e0a9bcfe176438ff327dee8ed53f557b., hostname=3e788d7781dc,38469,1733910650808, seqNum=2] 2024-12-11T09:50:53,805 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T09:50:53,807 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42368, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T09:50:53,816 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40323 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestHBaseWalOnEC 2024-12-11T09:50:53,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40323 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC 2024-12-11T09:50:53,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40323 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-11T09:50:53,823 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_PREPARE 2024-12-11T09:50:53,825 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-11T09:50:53,827 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-11T09:50:53,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40323 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-11T09:50:53,988 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38469 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-12-11T09:50:53,988 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3e788d7781dc:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestHBaseWalOnEC,,1733910653131.e0a9bcfe176438ff327dee8ed53f557b. 2024-12-11T09:50:53,992 INFO [RS_FLUSH_OPERATIONS-regionserver/3e788d7781dc:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing e0a9bcfe176438ff327dee8ed53f557b 1/1 column families, dataSize=32 B heapSize=360 B 2024-12-11T09:50:54,040 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3e788d7781dc:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34879/user/jenkins/test-data/0f1a18a3-705a-ac44-3173-a475130eeaf9/data/default/TestHBaseWalOnEC/e0a9bcfe176438ff327dee8ed53f557b/.tmp/cf/1cd1287ebe414068b8b4785e2a31a1d6 is 36, key is row/cf:cq/1733910653808/Put/seqid=0 2024-12-11T09:50:54,046 WARN [RS_FLUSH_OPERATIONS-regionserver/3e788d7781dc:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-11T09:50:54,046 WARN [RS_FLUSH_OPERATIONS-regionserver/3e788d7781dc:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-11T09:50:54,050 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_476418214_22 at /127.0.0.1:41564 [Receiving block BP-1086906460-172.17.0.2-1733910646032:blk_-9223372036854775648_1024] {}] datanode.DataXceiver(331): 127.0.0.1:46813:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41564 dst: /127.0.0.1:46813 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T09:50:54,054 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46813 is added to blk_-9223372036854775648_1025 (size=4787) 2024-12-11T09:50:54,055 WARN [RS_FLUSH_OPERATIONS-regionserver/3e788d7781dc:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-11T09:50:54,056 INFO [RS_FLUSH_OPERATIONS-regionserver/3e788d7781dc:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=32 B at sequenceid=5 (bloomFilter=false), to=hdfs://localhost:34879/user/jenkins/test-data/0f1a18a3-705a-ac44-3173-a475130eeaf9/data/default/TestHBaseWalOnEC/e0a9bcfe176438ff327dee8ed53f557b/.tmp/cf/1cd1287ebe414068b8b4785e2a31a1d6 2024-12-11T09:50:54,107 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3e788d7781dc:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34879/user/jenkins/test-data/0f1a18a3-705a-ac44-3173-a475130eeaf9/data/default/TestHBaseWalOnEC/e0a9bcfe176438ff327dee8ed53f557b/.tmp/cf/1cd1287ebe414068b8b4785e2a31a1d6 as hdfs://localhost:34879/user/jenkins/test-data/0f1a18a3-705a-ac44-3173-a475130eeaf9/data/default/TestHBaseWalOnEC/e0a9bcfe176438ff327dee8ed53f557b/cf/1cd1287ebe414068b8b4785e2a31a1d6 2024-12-11T09:50:54,118 INFO [RS_FLUSH_OPERATIONS-regionserver/3e788d7781dc:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34879/user/jenkins/test-data/0f1a18a3-705a-ac44-3173-a475130eeaf9/data/default/TestHBaseWalOnEC/e0a9bcfe176438ff327dee8ed53f557b/cf/1cd1287ebe414068b8b4785e2a31a1d6, entries=1, sequenceid=5, filesize=4.7 K 2024-12-11T09:50:54,125 INFO [RS_FLUSH_OPERATIONS-regionserver/3e788d7781dc:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~32 B/32, heapSize ~344 B/344, currentSize=0 B/0 for e0a9bcfe176438ff327dee8ed53f557b in 132ms, sequenceid=5, compaction requested=false 2024-12-11T09:50:54,126 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3e788d7781dc:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestHBaseWalOnEC' 2024-12-11T09:50:54,127 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3e788d7781dc:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for e0a9bcfe176438ff327dee8ed53f557b: 2024-12-11T09:50:54,127 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3e788d7781dc:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestHBaseWalOnEC,,1733910653131.e0a9bcfe176438ff327dee8ed53f557b. 2024-12-11T09:50:54,128 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3e788d7781dc:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-12-11T09:50:54,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40323 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-12-11T09:50:54,136 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-12-11T09:50:54,137 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 305 msec 2024-12-11T09:50:54,141 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC in 321 msec 2024-12-11T09:50:54,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40323 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-11T09:50:54,144 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestHBaseWalOnEC completed 2024-12-11T09:50:54,158 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-11T09:50:54,158 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-11T09:50:54,158 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-11T09:50:54,162 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T09:50:54,163 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T09:50:54,163 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-11T09:50:54,163 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-11T09:50:54,163 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=387613512, stopped=false 2024-12-11T09:50:54,163 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=3e788d7781dc,40323,1733910649947 2024-12-11T09:50:54,209 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44945-0x100147ff5e00002, quorum=127.0.0.1:60518, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-11T09:50:54,209 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38469-0x100147ff5e00003, quorum=127.0.0.1:60518, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-11T09:50:54,209 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40323-0x100147ff5e00000, quorum=127.0.0.1:60518, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-11T09:50:54,209 DEBUG [pool-65-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35681-0x100147ff5e00001, quorum=127.0.0.1:60518, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-11T09:50:54,210 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40323-0x100147ff5e00000, quorum=127.0.0.1:60518, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T09:50:54,210 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44945-0x100147ff5e00002, quorum=127.0.0.1:60518, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T09:50:54,210 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38469-0x100147ff5e00003, quorum=127.0.0.1:60518, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T09:50:54,210 DEBUG [pool-65-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35681-0x100147ff5e00001, quorum=127.0.0.1:60518, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T09:50:54,210 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-11T09:50:54,211 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-11T09:50:54,212 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-11T09:50:54,212 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:44945-0x100147ff5e00002, quorum=127.0.0.1:60518, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-11T09:50:54,212 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T09:50:54,212 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:38469-0x100147ff5e00003, quorum=127.0.0.1:60518, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-11T09:50:54,212 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:40323-0x100147ff5e00000, quorum=127.0.0.1:60518, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-11T09:50:54,212 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:35681-0x100147ff5e00001, quorum=127.0.0.1:60518, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-11T09:50:54,214 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '3e788d7781dc,35681,1733910650669' ***** 2024-12-11T09:50:54,214 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-11T09:50:54,214 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '3e788d7781dc,44945,1733910650767' ***** 2024-12-11T09:50:54,214 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-11T09:50:54,214 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '3e788d7781dc,38469,1733910650808' ***** 2024-12-11T09:50:54,215 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-11T09:50:54,215 INFO [RS:0;3e788d7781dc:35681 {}] regionserver.HeapMemoryManager(224): Stopping 2024-12-11T09:50:54,215 INFO [RS:2;3e788d7781dc:38469 {}] regionserver.HeapMemoryManager(224): Stopping 2024-12-11T09:50:54,215 INFO [RS:0;3e788d7781dc:35681 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-11T09:50:54,215 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-11T09:50:54,215 INFO [RS:0;3e788d7781dc:35681 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-11T09:50:54,215 INFO [RS:2;3e788d7781dc:38469 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-11T09:50:54,215 INFO [RS:1;3e788d7781dc:44945 {}] regionserver.HeapMemoryManager(224): Stopping 2024-12-11T09:50:54,215 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-11T09:50:54,215 INFO [RS:2;3e788d7781dc:38469 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-11T09:50:54,215 INFO [RS:0;3e788d7781dc:35681 {}] regionserver.HRegionServer(959): stopping server 3e788d7781dc,35681,1733910650669 2024-12-11T09:50:54,215 INFO [RS:1;3e788d7781dc:44945 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-11T09:50:54,215 INFO [RS:0;3e788d7781dc:35681 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-11T09:50:54,215 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-11T09:50:54,216 INFO [RS:1;3e788d7781dc:44945 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-11T09:50:54,216 INFO [RS:0;3e788d7781dc:35681 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;3e788d7781dc:35681. 2024-12-11T09:50:54,216 INFO [RS:2;3e788d7781dc:38469 {}] regionserver.HRegionServer(3091): Received CLOSE for e0a9bcfe176438ff327dee8ed53f557b 2024-12-11T09:50:54,216 INFO [RS:1;3e788d7781dc:44945 {}] regionserver.HRegionServer(959): stopping server 3e788d7781dc,44945,1733910650767 2024-12-11T09:50:54,216 INFO [RS:1;3e788d7781dc:44945 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-11T09:50:54,216 DEBUG [RS:0;3e788d7781dc:35681 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-11T09:50:54,216 DEBUG [RS:0;3e788d7781dc:35681 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T09:50:54,216 INFO [RS:1;3e788d7781dc:44945 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;3e788d7781dc:44945. 2024-12-11T09:50:54,216 DEBUG [RS:1;3e788d7781dc:44945 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-11T09:50:54,216 DEBUG [RS:1;3e788d7781dc:44945 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T09:50:54,216 INFO [RS:0;3e788d7781dc:35681 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-11T09:50:54,216 INFO [RS:0;3e788d7781dc:35681 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-11T09:50:54,216 INFO [RS:0;3e788d7781dc:35681 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-11T09:50:54,216 INFO [RS:1;3e788d7781dc:44945 {}] regionserver.HRegionServer(976): stopping server 3e788d7781dc,44945,1733910650767; all regions closed. 2024-12-11T09:50:54,216 INFO [RS:0;3e788d7781dc:35681 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-11T09:50:54,216 INFO [RS:2;3e788d7781dc:38469 {}] regionserver.HRegionServer(959): stopping server 3e788d7781dc,38469,1733910650808 2024-12-11T09:50:54,217 INFO [RS:2;3e788d7781dc:38469 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-11T09:50:54,217 INFO [RS:2;3e788d7781dc:38469 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:2;3e788d7781dc:38469. 2024-12-11T09:50:54,217 DEBUG [RS:2;3e788d7781dc:38469 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-11T09:50:54,217 DEBUG [RS:2;3e788d7781dc:38469 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T09:50:54,217 INFO [RS:0;3e788d7781dc:35681 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-12-11T09:50:54,217 INFO [RS:2;3e788d7781dc:38469 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-12-11T09:50:54,217 DEBUG [RS:2;3e788d7781dc:38469 {}] regionserver.HRegionServer(1325): Online Regions={e0a9bcfe176438ff327dee8ed53f557b=TestHBaseWalOnEC,,1733910653131.e0a9bcfe176438ff327dee8ed53f557b.} 2024-12-11T09:50:54,217 DEBUG [RS:0;3e788d7781dc:35681 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-12-11T09:50:54,217 DEBUG [RS_CLOSE_REGION-regionserver/3e788d7781dc:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing e0a9bcfe176438ff327dee8ed53f557b, disabling compactions & flushes 2024-12-11T09:50:54,217 INFO [RS_CLOSE_REGION-regionserver/3e788d7781dc:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1733910653131.e0a9bcfe176438ff327dee8ed53f557b. 2024-12-11T09:50:54,217 DEBUG [RS_CLOSE_REGION-regionserver/3e788d7781dc:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1733910653131.e0a9bcfe176438ff327dee8ed53f557b. 2024-12-11T09:50:54,218 DEBUG [RS:0;3e788d7781dc:35681 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-12-11T09:50:54,218 DEBUG [RS_CLOSE_REGION-regionserver/3e788d7781dc:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1733910653131.e0a9bcfe176438ff327dee8ed53f557b. after waiting 0 ms 2024-12-11T09:50:54,218 DEBUG [RS_CLOSE_REGION-regionserver/3e788d7781dc:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1733910653131.e0a9bcfe176438ff327dee8ed53f557b. 2024-12-11T09:50:54,218 DEBUG [RS:2;3e788d7781dc:38469 {}] regionserver.HRegionServer(1351): Waiting on e0a9bcfe176438ff327dee8ed53f557b 2024-12-11T09:50:54,217 DEBUG [RS_CLOSE_META-regionserver/3e788d7781dc:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-11T09:50:54,218 INFO [RS_CLOSE_META-regionserver/3e788d7781dc:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-11T09:50:54,218 DEBUG [RS_CLOSE_META-regionserver/3e788d7781dc:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-11T09:50:54,218 DEBUG [RS_CLOSE_META-regionserver/3e788d7781dc:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-11T09:50:54,218 DEBUG [RS_CLOSE_META-regionserver/3e788d7781dc:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-11T09:50:54,218 INFO [RS_CLOSE_META-regionserver/3e788d7781dc:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.34 KB heapSize=3.38 KB 2024-12-11T09:50:54,222 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41951 is added to blk_1073741826_1016 (size=93) 2024-12-11T09:50:54,222 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46813 is added to blk_1073741826_1016 (size=93) 2024-12-11T09:50:54,223 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44455 is added to blk_1073741826_1016 (size=93) 2024-12-11T09:50:54,230 DEBUG [RS:1;3e788d7781dc:44945 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/0f1a18a3-705a-ac44-3173-a475130eeaf9/oldWALs 2024-12-11T09:50:54,230 INFO [RS:1;3e788d7781dc:44945 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 3e788d7781dc%2C44945%2C1733910650767:(num 1733910652249) 2024-12-11T09:50:54,230 DEBUG [RS:1;3e788d7781dc:44945 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T09:50:54,230 INFO [RS:1;3e788d7781dc:44945 {}] regionserver.LeaseManager(133): Closed leases 2024-12-11T09:50:54,230 INFO [RS:1;3e788d7781dc:44945 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-11T09:50:54,230 INFO [RS:1;3e788d7781dc:44945 {}] hbase.ChoreService(370): Chore service for: regionserver/3e788d7781dc:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-12-11T09:50:54,231 INFO [RS:1;3e788d7781dc:44945 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-11T09:50:54,231 INFO [RS:1;3e788d7781dc:44945 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-11T09:50:54,231 INFO [regionserver/3e788d7781dc:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-11T09:50:54,231 INFO [RS:1;3e788d7781dc:44945 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-11T09:50:54,231 INFO [RS:1;3e788d7781dc:44945 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-11T09:50:54,231 INFO [RS:1;3e788d7781dc:44945 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:44945 2024-12-11T09:50:54,236 DEBUG [RS_CLOSE_REGION-regionserver/3e788d7781dc:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34879/user/jenkins/test-data/0f1a18a3-705a-ac44-3173-a475130eeaf9/data/default/TestHBaseWalOnEC/e0a9bcfe176438ff327dee8ed53f557b/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-11T09:50:54,238 INFO [RS_CLOSE_REGION-regionserver/3e788d7781dc:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1733910653131.e0a9bcfe176438ff327dee8ed53f557b. 2024-12-11T09:50:54,238 DEBUG [RS_CLOSE_REGION-regionserver/3e788d7781dc:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for e0a9bcfe176438ff327dee8ed53f557b: Waiting for close lock at 1733910654217Running coprocessor pre-close hooks at 1733910654217Disabling compacts and flushes for region at 1733910654217Disabling writes for close at 1733910654218 (+1 ms)Writing region close event to WAL at 1733910654218Running coprocessor post-close hooks at 1733910654237 (+19 ms)Closed at 1733910654238 (+1 ms) 2024-12-11T09:50:54,239 DEBUG [RS_CLOSE_REGION-regionserver/3e788d7781dc:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestHBaseWalOnEC,,1733910653131.e0a9bcfe176438ff327dee8ed53f557b. 2024-12-11T09:50:54,242 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40323-0x100147ff5e00000, quorum=127.0.0.1:60518, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-11T09:50:54,242 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44945-0x100147ff5e00002, quorum=127.0.0.1:60518, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/3e788d7781dc,44945,1733910650767 2024-12-11T09:50:54,242 INFO [RS:1;3e788d7781dc:44945 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-11T09:50:54,243 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [3e788d7781dc,44945,1733910650767] 2024-12-11T09:50:54,251 DEBUG [RS_CLOSE_META-regionserver/3e788d7781dc:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34879/user/jenkins/test-data/0f1a18a3-705a-ac44-3173-a475130eeaf9/data/hbase/meta/1588230740/.tmp/info/78a41cb1ab2b46349e639e156eddd558 is 153, key is TestHBaseWalOnEC,,1733910653131.e0a9bcfe176438ff327dee8ed53f557b./info:regioninfo/1733910653575/Put/seqid=0 2024-12-11T09:50:54,254 WARN [RS_CLOSE_META-regionserver/3e788d7781dc:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-11T09:50:54,254 WARN [RS_CLOSE_META-regionserver/3e788d7781dc:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-11T09:50:54,258 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_626028000_22 at /127.0.0.1:41580 [Receiving block BP-1086906460-172.17.0.2-1733910646032:blk_-9223372036854775632_1026] {}] datanode.DataXceiver(331): 127.0.0.1:46813:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41580 dst: /127.0.0.1:46813 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T09:50:54,259 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/3e788d7781dc,44945,1733910650767 already deleted, retry=false 2024-12-11T09:50:54,259 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 3e788d7781dc,44945,1733910650767 expired; onlineServers=2 2024-12-11T09:50:54,262 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46813 is added to blk_-9223372036854775632_1027 (size=6637) 2024-12-11T09:50:54,263 WARN [RS_CLOSE_META-regionserver/3e788d7781dc:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-11T09:50:54,263 INFO [RS_CLOSE_META-regionserver/3e788d7781dc:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.18 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:34879/user/jenkins/test-data/0f1a18a3-705a-ac44-3173-a475130eeaf9/data/hbase/meta/1588230740/.tmp/info/78a41cb1ab2b46349e639e156eddd558 2024-12-11T09:50:54,287 DEBUG [RS_CLOSE_META-regionserver/3e788d7781dc:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34879/user/jenkins/test-data/0f1a18a3-705a-ac44-3173-a475130eeaf9/data/hbase/meta/1588230740/.tmp/ns/df8e00a8f52f46fab12e060452d7281c is 43, key is default/ns:d/1733910652936/Put/seqid=0 2024-12-11T09:50:54,290 INFO [regionserver/3e788d7781dc:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-11T09:50:54,290 INFO [regionserver/3e788d7781dc:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-11T09:50:54,290 WARN [RS_CLOSE_META-regionserver/3e788d7781dc:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-11T09:50:54,290 WARN [RS_CLOSE_META-regionserver/3e788d7781dc:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-11T09:50:54,291 INFO [regionserver/3e788d7781dc:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-11T09:50:54,294 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_626028000_22 at /127.0.0.1:33356 [Receiving block BP-1086906460-172.17.0.2-1733910646032:blk_-9223372036854775616_1028] {}] datanode.DataXceiver(331): 127.0.0.1:44455:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:33356 dst: /127.0.0.1:44455 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T09:50:54,298 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44455 is added to blk_-9223372036854775616_1029 (size=5153) 2024-12-11T09:50:54,298 WARN [RS_CLOSE_META-regionserver/3e788d7781dc:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-11T09:50:54,298 INFO [RS_CLOSE_META-regionserver/3e788d7781dc:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:34879/user/jenkins/test-data/0f1a18a3-705a-ac44-3173-a475130eeaf9/data/hbase/meta/1588230740/.tmp/ns/df8e00a8f52f46fab12e060452d7281c 2024-12-11T09:50:54,322 DEBUG [RS_CLOSE_META-regionserver/3e788d7781dc:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34879/user/jenkins/test-data/0f1a18a3-705a-ac44-3173-a475130eeaf9/data/hbase/meta/1588230740/.tmp/table/8d489192a39e493fb5ebc5e88c89ebc9 is 52, key is TestHBaseWalOnEC/table:state/1733910653590/Put/seqid=0 2024-12-11T09:50:54,325 WARN [RS_CLOSE_META-regionserver/3e788d7781dc:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-11T09:50:54,325 WARN [RS_CLOSE_META-regionserver/3e788d7781dc:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-11T09:50:54,328 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_626028000_22 at /127.0.0.1:54826 [Receiving block BP-1086906460-172.17.0.2-1733910646032:blk_-9223372036854775600_1030] {}] datanode.DataXceiver(331): 127.0.0.1:41951:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54826 dst: /127.0.0.1:41951 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T09:50:54,332 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41951 is added to blk_-9223372036854775600_1031 (size=5249) 2024-12-11T09:50:54,333 WARN [RS_CLOSE_META-regionserver/3e788d7781dc:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-11T09:50:54,333 INFO [RS_CLOSE_META-regionserver/3e788d7781dc:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=96 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:34879/user/jenkins/test-data/0f1a18a3-705a-ac44-3173-a475130eeaf9/data/hbase/meta/1588230740/.tmp/table/8d489192a39e493fb5ebc5e88c89ebc9 2024-12-11T09:50:54,342 DEBUG [RS_CLOSE_META-regionserver/3e788d7781dc:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34879/user/jenkins/test-data/0f1a18a3-705a-ac44-3173-a475130eeaf9/data/hbase/meta/1588230740/.tmp/info/78a41cb1ab2b46349e639e156eddd558 as hdfs://localhost:34879/user/jenkins/test-data/0f1a18a3-705a-ac44-3173-a475130eeaf9/data/hbase/meta/1588230740/info/78a41cb1ab2b46349e639e156eddd558 2024-12-11T09:50:54,351 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44945-0x100147ff5e00002, quorum=127.0.0.1:60518, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-11T09:50:54,351 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44945-0x100147ff5e00002, quorum=127.0.0.1:60518, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-11T09:50:54,352 INFO [RS:1;3e788d7781dc:44945 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-11T09:50:54,352 INFO [RS:1;3e788d7781dc:44945 {}] regionserver.HRegionServer(1031): Exiting; stopping=3e788d7781dc,44945,1733910650767; zookeeper connection closed. 2024-12-11T09:50:54,352 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@334bb9f6 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@334bb9f6 2024-12-11T09:50:54,352 INFO [RS_CLOSE_META-regionserver/3e788d7781dc:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34879/user/jenkins/test-data/0f1a18a3-705a-ac44-3173-a475130eeaf9/data/hbase/meta/1588230740/info/78a41cb1ab2b46349e639e156eddd558, entries=10, sequenceid=11, filesize=6.5 K 2024-12-11T09:50:54,354 DEBUG [RS_CLOSE_META-regionserver/3e788d7781dc:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34879/user/jenkins/test-data/0f1a18a3-705a-ac44-3173-a475130eeaf9/data/hbase/meta/1588230740/.tmp/ns/df8e00a8f52f46fab12e060452d7281c as hdfs://localhost:34879/user/jenkins/test-data/0f1a18a3-705a-ac44-3173-a475130eeaf9/data/hbase/meta/1588230740/ns/df8e00a8f52f46fab12e060452d7281c 2024-12-11T09:50:54,363 INFO [RS_CLOSE_META-regionserver/3e788d7781dc:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34879/user/jenkins/test-data/0f1a18a3-705a-ac44-3173-a475130eeaf9/data/hbase/meta/1588230740/ns/df8e00a8f52f46fab12e060452d7281c, entries=2, sequenceid=11, filesize=5.0 K 2024-12-11T09:50:54,364 DEBUG [RS_CLOSE_META-regionserver/3e788d7781dc:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34879/user/jenkins/test-data/0f1a18a3-705a-ac44-3173-a475130eeaf9/data/hbase/meta/1588230740/.tmp/table/8d489192a39e493fb5ebc5e88c89ebc9 as hdfs://localhost:34879/user/jenkins/test-data/0f1a18a3-705a-ac44-3173-a475130eeaf9/data/hbase/meta/1588230740/table/8d489192a39e493fb5ebc5e88c89ebc9 2024-12-11T09:50:54,374 INFO [RS_CLOSE_META-regionserver/3e788d7781dc:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34879/user/jenkins/test-data/0f1a18a3-705a-ac44-3173-a475130eeaf9/data/hbase/meta/1588230740/table/8d489192a39e493fb5ebc5e88c89ebc9, entries=2, sequenceid=11, filesize=5.1 K 2024-12-11T09:50:54,375 INFO [RS_CLOSE_META-regionserver/3e788d7781dc:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 157ms, sequenceid=11, compaction requested=false 2024-12-11T09:50:54,375 DEBUG [RS_CLOSE_META-regionserver/3e788d7781dc:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-11T09:50:54,383 DEBUG [RS_CLOSE_META-regionserver/3e788d7781dc:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34879/user/jenkins/test-data/0f1a18a3-705a-ac44-3173-a475130eeaf9/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-12-11T09:50:54,384 DEBUG [RS_CLOSE_META-regionserver/3e788d7781dc:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-11T09:50:54,384 INFO [RS_CLOSE_META-regionserver/3e788d7781dc:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-11T09:50:54,384 DEBUG [RS_CLOSE_META-regionserver/3e788d7781dc:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733910654217Running coprocessor pre-close hooks at 1733910654217Disabling compacts and flushes for region at 1733910654217Disabling writes for close at 1733910654218 (+1 ms)Obtaining lock to block concurrent updates at 1733910654218Preparing flush snapshotting stores in 1588230740 at 1733910654218Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1377, getHeapSize=3392, getOffHeapSize=0, getCellsCount=14 at 1733910654219 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1733910654220 (+1 ms)Flushing 1588230740/info: creating writer at 1733910654221 (+1 ms)Flushing 1588230740/info: appending metadata at 1733910654249 (+28 ms)Flushing 1588230740/info: closing flushed file at 1733910654249Flushing 1588230740/ns: creating writer at 1733910654272 (+23 ms)Flushing 1588230740/ns: appending metadata at 1733910654286 (+14 ms)Flushing 1588230740/ns: closing flushed file at 1733910654286Flushing 1588230740/table: creating writer at 1733910654307 (+21 ms)Flushing 1588230740/table: appending metadata at 1733910654321 (+14 ms)Flushing 1588230740/table: closing flushed file at 1733910654321Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@27ce40bb: reopening flushed file at 1733910654341 (+20 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@20acaf38: reopening flushed file at 1733910654353 (+12 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@54376b5a: reopening flushed file at 1733910654363 (+10 ms)Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 157ms, sequenceid=11, compaction requested=false at 1733910654375 (+12 ms)Writing region close event to WAL at 1733910654377 (+2 ms)Running coprocessor post-close hooks at 1733910654384 (+7 ms)Closed at 1733910654384 2024-12-11T09:50:54,384 DEBUG [RS_CLOSE_META-regionserver/3e788d7781dc:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-11T09:50:54,418 INFO [RS:0;3e788d7781dc:35681 {}] regionserver.HRegionServer(976): stopping server 3e788d7781dc,35681,1733910650669; all regions closed. 2024-12-11T09:50:54,418 INFO [RS:2;3e788d7781dc:38469 {}] regionserver.HRegionServer(976): stopping server 3e788d7781dc,38469,1733910650808; all regions closed. 2024-12-11T09:50:54,423 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41951 is added to blk_1073741829_1019 (size=2751) 2024-12-11T09:50:54,424 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46813 is added to blk_1073741828_1018 (size=1298) 2024-12-11T09:50:54,424 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46813 is added to blk_1073741829_1019 (size=2751) 2024-12-11T09:50:54,424 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44455 is added to blk_1073741828_1018 (size=1298) 2024-12-11T09:50:54,424 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44455 is added to blk_1073741829_1019 (size=2751) 2024-12-11T09:50:54,426 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41951 is added to blk_1073741828_1018 (size=1298) 2024-12-11T09:50:54,429 DEBUG [RS:2;3e788d7781dc:38469 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/0f1a18a3-705a-ac44-3173-a475130eeaf9/oldWALs 2024-12-11T09:50:54,430 INFO [RS:2;3e788d7781dc:38469 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 3e788d7781dc%2C38469%2C1733910650808:(num 1733910652249) 2024-12-11T09:50:54,430 DEBUG [RS:2;3e788d7781dc:38469 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T09:50:54,430 DEBUG [RS:0;3e788d7781dc:35681 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/0f1a18a3-705a-ac44-3173-a475130eeaf9/oldWALs 2024-12-11T09:50:54,430 INFO [RS:2;3e788d7781dc:38469 {}] regionserver.LeaseManager(133): Closed leases 2024-12-11T09:50:54,430 INFO [RS:0;3e788d7781dc:35681 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 3e788d7781dc%2C35681%2C1733910650669.meta:.meta(num 1733910652732) 2024-12-11T09:50:54,430 INFO [RS:2;3e788d7781dc:38469 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-11T09:50:54,430 INFO [RS:2;3e788d7781dc:38469 {}] hbase.ChoreService(370): Chore service for: regionserver/3e788d7781dc:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-11T09:50:54,430 INFO [RS:2;3e788d7781dc:38469 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-11T09:50:54,430 INFO [regionserver/3e788d7781dc:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-11T09:50:54,430 INFO [RS:2;3e788d7781dc:38469 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-11T09:50:54,430 INFO [RS:2;3e788d7781dc:38469 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-11T09:50:54,430 INFO [RS:2;3e788d7781dc:38469 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-11T09:50:54,430 INFO [RS:2;3e788d7781dc:38469 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:38469 2024-12-11T09:50:54,433 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41951 is added to blk_1073741827_1017 (size=93) 2024-12-11T09:50:54,433 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44455 is added to blk_1073741827_1017 (size=93) 2024-12-11T09:50:54,433 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46813 is added to blk_1073741827_1017 (size=93) 2024-12-11T09:50:54,436 DEBUG [RS:0;3e788d7781dc:35681 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/0f1a18a3-705a-ac44-3173-a475130eeaf9/oldWALs 2024-12-11T09:50:54,436 INFO [RS:0;3e788d7781dc:35681 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 3e788d7781dc%2C35681%2C1733910650669:(num 1733910652249) 2024-12-11T09:50:54,436 DEBUG [RS:0;3e788d7781dc:35681 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T09:50:54,436 INFO [RS:0;3e788d7781dc:35681 {}] regionserver.LeaseManager(133): Closed leases 2024-12-11T09:50:54,436 INFO [RS:0;3e788d7781dc:35681 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-11T09:50:54,436 INFO [RS:0;3e788d7781dc:35681 {}] hbase.ChoreService(370): Chore service for: regionserver/3e788d7781dc:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-11T09:50:54,436 INFO [RS:0;3e788d7781dc:35681 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-11T09:50:54,436 INFO [regionserver/3e788d7781dc:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-11T09:50:54,437 INFO [RS:0;3e788d7781dc:35681 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:35681 2024-12-11T09:50:54,441 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40323-0x100147ff5e00000, quorum=127.0.0.1:60518, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-11T09:50:54,441 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38469-0x100147ff5e00003, quorum=127.0.0.1:60518, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/3e788d7781dc,38469,1733910650808 2024-12-11T09:50:54,441 INFO [RS:2;3e788d7781dc:38469 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-11T09:50:54,449 DEBUG [pool-65-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35681-0x100147ff5e00001, quorum=127.0.0.1:60518, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/3e788d7781dc,35681,1733910650669 2024-12-11T09:50:54,449 INFO [RS:0;3e788d7781dc:35681 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-11T09:50:54,457 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [3e788d7781dc,35681,1733910650669] 2024-12-11T09:50:54,474 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/3e788d7781dc,35681,1733910650669 already deleted, retry=false 2024-12-11T09:50:54,474 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 3e788d7781dc,35681,1733910650669 expired; onlineServers=1 2024-12-11T09:50:54,474 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [3e788d7781dc,38469,1733910650808] 2024-12-11T09:50:54,484 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/3e788d7781dc,38469,1733910650808 already deleted, retry=false 2024-12-11T09:50:54,484 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 3e788d7781dc,38469,1733910650808 expired; onlineServers=0 2024-12-11T09:50:54,485 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '3e788d7781dc,40323,1733910649947' ***** 2024-12-11T09:50:54,485 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-11T09:50:54,485 INFO [M:0;3e788d7781dc:40323 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-11T09:50:54,485 INFO [M:0;3e788d7781dc:40323 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-11T09:50:54,485 DEBUG [M:0;3e788d7781dc:40323 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-11T09:50:54,486 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-11T09:50:54,486 DEBUG [M:0;3e788d7781dc:40323 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-11T09:50:54,486 DEBUG [master/3e788d7781dc:0:becomeActiveMaster-HFileCleaner.small.0-1733910651879 {}] cleaner.HFileCleaner(306): Exit Thread[master/3e788d7781dc:0:becomeActiveMaster-HFileCleaner.small.0-1733910651879,5,FailOnTimeoutGroup] 2024-12-11T09:50:54,486 DEBUG [master/3e788d7781dc:0:becomeActiveMaster-HFileCleaner.large.0-1733910651878 {}] cleaner.HFileCleaner(306): Exit Thread[master/3e788d7781dc:0:becomeActiveMaster-HFileCleaner.large.0-1733910651878,5,FailOnTimeoutGroup] 2024-12-11T09:50:54,487 INFO [M:0;3e788d7781dc:40323 {}] hbase.ChoreService(370): Chore service for: master/3e788d7781dc:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-11T09:50:54,487 INFO [M:0;3e788d7781dc:40323 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-11T09:50:54,487 DEBUG [M:0;3e788d7781dc:40323 {}] master.HMaster(1795): Stopping service threads 2024-12-11T09:50:54,487 INFO [M:0;3e788d7781dc:40323 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-11T09:50:54,488 INFO [M:0;3e788d7781dc:40323 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-11T09:50:54,489 INFO [M:0;3e788d7781dc:40323 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-11T09:50:54,489 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-11T09:50:54,493 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40323-0x100147ff5e00000, quorum=127.0.0.1:60518, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-11T09:50:54,493 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40323-0x100147ff5e00000, quorum=127.0.0.1:60518, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T09:50:54,493 DEBUG [M:0;3e788d7781dc:40323 {}] zookeeper.ZKUtil(347): master:40323-0x100147ff5e00000, quorum=127.0.0.1:60518, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-11T09:50:54,493 WARN [M:0;3e788d7781dc:40323 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-11T09:50:54,495 INFO [M:0;3e788d7781dc:40323 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:34879/user/jenkins/test-data/0f1a18a3-705a-ac44-3173-a475130eeaf9/.lastflushedseqids 2024-12-11T09:50:54,504 WARN [M:0;3e788d7781dc:40323 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-11T09:50:54,504 WARN [M:0;3e788d7781dc:40323 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-11T09:50:54,506 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-366492811_22 at /127.0.0.1:33370 [Receiving block BP-1086906460-172.17.0.2-1733910646032:blk_-9223372036854775584_1032] {}] datanode.DataXceiver(331): 127.0.0.1:44455:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:33370 dst: /127.0.0.1:44455 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T09:50:54,510 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44455 is added to blk_-9223372036854775584_1033 (size=127) 2024-12-11T09:50:54,510 WARN [M:0;3e788d7781dc:40323 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-11T09:50:54,511 INFO [M:0;3e788d7781dc:40323 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-11T09:50:54,511 INFO [M:0;3e788d7781dc:40323 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-11T09:50:54,511 DEBUG [M:0;3e788d7781dc:40323 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-11T09:50:54,511 INFO [M:0;3e788d7781dc:40323 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-11T09:50:54,511 DEBUG [M:0;3e788d7781dc:40323 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-11T09:50:54,511 DEBUG [M:0;3e788d7781dc:40323 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-11T09:50:54,511 DEBUG [M:0;3e788d7781dc:40323 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-11T09:50:54,512 INFO [M:0;3e788d7781dc:40323 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=26.82 KB heapSize=34.11 KB 2024-12-11T09:50:54,531 DEBUG [M:0;3e788d7781dc:40323 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34879/user/jenkins/test-data/0f1a18a3-705a-ac44-3173-a475130eeaf9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/e5c5dcbc7d90416ba55b73bfbb8a7abd is 82, key is hbase:meta,,1/info:regioninfo/1733910652814/Put/seqid=0 2024-12-11T09:50:54,533 WARN [M:0;3e788d7781dc:40323 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-11T09:50:54,533 WARN [M:0;3e788d7781dc:40323 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-11T09:50:54,536 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-366492811_22 at /127.0.0.1:41602 [Receiving block BP-1086906460-172.17.0.2-1733910646032:blk_-9223372036854775568_1034] {}] datanode.DataXceiver(331): 127.0.0.1:46813:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41602 dst: /127.0.0.1:46813 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T09:50:54,541 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46813 is added to blk_-9223372036854775568_1035 (size=5672) 2024-12-11T09:50:54,542 WARN [M:0;3e788d7781dc:40323 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-11T09:50:54,542 INFO [M:0;3e788d7781dc:40323 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:34879/user/jenkins/test-data/0f1a18a3-705a-ac44-3173-a475130eeaf9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/e5c5dcbc7d90416ba55b73bfbb8a7abd 2024-12-11T09:50:54,558 INFO [RS:2;3e788d7781dc:38469 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-11T09:50:54,558 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38469-0x100147ff5e00003, quorum=127.0.0.1:60518, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-11T09:50:54,558 INFO [RS:2;3e788d7781dc:38469 {}] regionserver.HRegionServer(1031): Exiting; stopping=3e788d7781dc,38469,1733910650808; zookeeper connection closed. 2024-12-11T09:50:54,558 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38469-0x100147ff5e00003, quorum=127.0.0.1:60518, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-11T09:50:54,558 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@439af143 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@439af143 2024-12-11T09:50:54,566 DEBUG [pool-65-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35681-0x100147ff5e00001, quorum=127.0.0.1:60518, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-11T09:50:54,566 INFO [RS:0;3e788d7781dc:35681 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-11T09:50:54,566 INFO [RS:0;3e788d7781dc:35681 {}] regionserver.HRegionServer(1031): Exiting; stopping=3e788d7781dc,35681,1733910650669; zookeeper connection closed. 2024-12-11T09:50:54,566 DEBUG [pool-65-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35681-0x100147ff5e00001, quorum=127.0.0.1:60518, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-11T09:50:54,566 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@f5e81a5 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@f5e81a5 2024-12-11T09:50:54,567 DEBUG [M:0;3e788d7781dc:40323 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34879/user/jenkins/test-data/0f1a18a3-705a-ac44-3173-a475130eeaf9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/8db766d5524f47efb8512b6e57649771 is 747, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1733910653597/Put/seqid=0 2024-12-11T09:50:54,567 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 3 regionserver(s) complete 2024-12-11T09:50:54,568 WARN [M:0;3e788d7781dc:40323 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-11T09:50:54,569 WARN [M:0;3e788d7781dc:40323 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-11T09:50:54,571 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-366492811_22 at /127.0.0.1:41624 [Receiving block BP-1086906460-172.17.0.2-1733910646032:blk_-9223372036854775552_1036] {}] datanode.DataXceiver(331): 127.0.0.1:46813:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41624 dst: /127.0.0.1:46813 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T09:50:54,575 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46813 is added to blk_-9223372036854775552_1037 (size=6438) 2024-12-11T09:50:54,576 WARN [M:0;3e788d7781dc:40323 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-11T09:50:54,576 INFO [M:0;3e788d7781dc:40323 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.13 KB at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:34879/user/jenkins/test-data/0f1a18a3-705a-ac44-3173-a475130eeaf9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/8db766d5524f47efb8512b6e57649771 2024-12-11T09:50:54,597 DEBUG [M:0;3e788d7781dc:40323 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34879/user/jenkins/test-data/0f1a18a3-705a-ac44-3173-a475130eeaf9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/3671dcb2071b47749dc1437d7ffb4fbf is 69, key is 3e788d7781dc,35681,1733910650669/rs:state/1733910651973/Put/seqid=0 2024-12-11T09:50:54,599 WARN [M:0;3e788d7781dc:40323 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-11T09:50:54,599 WARN [M:0;3e788d7781dc:40323 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-11T09:50:54,602 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-366492811_22 at /127.0.0.1:33400 [Receiving block BP-1086906460-172.17.0.2-1733910646032:blk_-9223372036854775536_1038] {}] datanode.DataXceiver(331): 127.0.0.1:44455:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:33400 dst: /127.0.0.1:44455 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T09:50:54,606 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44455 is added to blk_-9223372036854775536_1039 (size=5294) 2024-12-11T09:50:54,607 WARN [M:0;3e788d7781dc:40323 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-11T09:50:54,607 INFO [M:0;3e788d7781dc:40323 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=195 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:34879/user/jenkins/test-data/0f1a18a3-705a-ac44-3173-a475130eeaf9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/3671dcb2071b47749dc1437d7ffb4fbf 2024-12-11T09:50:54,616 DEBUG [M:0;3e788d7781dc:40323 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34879/user/jenkins/test-data/0f1a18a3-705a-ac44-3173-a475130eeaf9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/e5c5dcbc7d90416ba55b73bfbb8a7abd as hdfs://localhost:34879/user/jenkins/test-data/0f1a18a3-705a-ac44-3173-a475130eeaf9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/e5c5dcbc7d90416ba55b73bfbb8a7abd 2024-12-11T09:50:54,624 INFO [M:0;3e788d7781dc:40323 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34879/user/jenkins/test-data/0f1a18a3-705a-ac44-3173-a475130eeaf9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/e5c5dcbc7d90416ba55b73bfbb8a7abd, entries=8, sequenceid=72, filesize=5.5 K 2024-12-11T09:50:54,626 DEBUG [M:0;3e788d7781dc:40323 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34879/user/jenkins/test-data/0f1a18a3-705a-ac44-3173-a475130eeaf9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/8db766d5524f47efb8512b6e57649771 as hdfs://localhost:34879/user/jenkins/test-data/0f1a18a3-705a-ac44-3173-a475130eeaf9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/8db766d5524f47efb8512b6e57649771 2024-12-11T09:50:54,633 INFO [M:0;3e788d7781dc:40323 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34879/user/jenkins/test-data/0f1a18a3-705a-ac44-3173-a475130eeaf9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/8db766d5524f47efb8512b6e57649771, entries=8, sequenceid=72, filesize=6.3 K 2024-12-11T09:50:54,635 DEBUG [M:0;3e788d7781dc:40323 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34879/user/jenkins/test-data/0f1a18a3-705a-ac44-3173-a475130eeaf9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/3671dcb2071b47749dc1437d7ffb4fbf as hdfs://localhost:34879/user/jenkins/test-data/0f1a18a3-705a-ac44-3173-a475130eeaf9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/3671dcb2071b47749dc1437d7ffb4fbf 2024-12-11T09:50:54,642 INFO [M:0;3e788d7781dc:40323 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34879/user/jenkins/test-data/0f1a18a3-705a-ac44-3173-a475130eeaf9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/3671dcb2071b47749dc1437d7ffb4fbf, entries=3, sequenceid=72, filesize=5.2 K 2024-12-11T09:50:54,643 INFO [M:0;3e788d7781dc:40323 {}] regionserver.HRegion(3140): Finished flush of dataSize ~26.82 KB/27459, heapSize ~33.81 KB/34624, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 132ms, sequenceid=72, compaction requested=false 2024-12-11T09:50:54,644 INFO [M:0;3e788d7781dc:40323 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-11T09:50:54,644 DEBUG [M:0;3e788d7781dc:40323 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733910654511Disabling compacts and flushes for region at 1733910654511Disabling writes for close at 1733910654511Obtaining lock to block concurrent updates at 1733910654512 (+1 ms)Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733910654512Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=27459, getHeapSize=34864, getOffHeapSize=0, getCellsCount=85 at 1733910654512Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733910654513 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733910654513Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733910654530 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733910654530Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733910654550 (+20 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733910654566 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733910654566Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733910654582 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733910654597 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733910654597Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7fa550fe: reopening flushed file at 1733910654615 (+18 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@54c6445c: reopening flushed file at 1733910654625 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3316a97: reopening flushed file at 1733910654634 (+9 ms)Finished flush of dataSize ~26.82 KB/27459, heapSize ~33.81 KB/34624, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 132ms, sequenceid=72, compaction requested=false at 1733910654643 (+9 ms)Writing region close event to WAL at 1733910654644 (+1 ms)Closed at 1733910654644 2024-12-11T09:50:54,647 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44455 is added to blk_1073741825_1011 (size=32662) 2024-12-11T09:50:54,647 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46813 is added to blk_1073741825_1011 (size=32662) 2024-12-11T09:50:54,647 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41951 is added to blk_1073741825_1011 (size=32662) 2024-12-11T09:50:54,648 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-11T09:50:54,648 INFO [M:0;3e788d7781dc:40323 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-11T09:50:54,648 INFO [M:0;3e788d7781dc:40323 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:40323 2024-12-11T09:50:54,648 INFO [M:0;3e788d7781dc:40323 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-11T09:50:54,758 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40323-0x100147ff5e00000, quorum=127.0.0.1:60518, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-11T09:50:54,758 INFO [M:0;3e788d7781dc:40323 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-11T09:50:54,758 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40323-0x100147ff5e00000, quorum=127.0.0.1:60518, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-11T09:50:54,805 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4fcce8ef{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-11T09:50:54,809 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4a241def{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-11T09:50:54,809 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-11T09:50:54,809 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1646e48a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-11T09:50:54,809 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@cf3d394{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7ba4e899-2673-5e61-3f0e-a78f432d161e/hadoop.log.dir/,STOPPED} 2024-12-11T09:50:54,812 WARN [BP-1086906460-172.17.0.2-1733910646032 heartbeating to localhost/127.0.0.1:34879 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-11T09:50:54,812 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-11T09:50:54,812 WARN [BP-1086906460-172.17.0.2-1733910646032 heartbeating to localhost/127.0.0.1:34879 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1086906460-172.17.0.2-1733910646032 (Datanode Uuid 0f6b940d-636d-414c-8460-b06df22cc5cc) service to localhost/127.0.0.1:34879 2024-12-11T09:50:54,812 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-11T09:50:54,813 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7ba4e899-2673-5e61-3f0e-a78f432d161e/cluster_dfc4bfcd-800b-06f9-90c7-85deae558f04/data/data5/current/BP-1086906460-172.17.0.2-1733910646032 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-11T09:50:54,813 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7ba4e899-2673-5e61-3f0e-a78f432d161e/cluster_dfc4bfcd-800b-06f9-90c7-85deae558f04/data/data6/current/BP-1086906460-172.17.0.2-1733910646032 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-11T09:50:54,814 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-11T09:50:54,816 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@26ca39fe{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-11T09:50:54,816 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5ad305cc{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-11T09:50:54,816 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-11T09:50:54,816 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@56f2bf79{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-11T09:50:54,817 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4f372e8e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7ba4e899-2673-5e61-3f0e-a78f432d161e/hadoop.log.dir/,STOPPED} 2024-12-11T09:50:54,818 WARN [BP-1086906460-172.17.0.2-1733910646032 heartbeating to localhost/127.0.0.1:34879 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-11T09:50:54,818 WARN [BP-1086906460-172.17.0.2-1733910646032 heartbeating to localhost/127.0.0.1:34879 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1086906460-172.17.0.2-1733910646032 (Datanode Uuid 8bd095ef-413b-445d-a8ff-fe1511ae1d57) service to localhost/127.0.0.1:34879 2024-12-11T09:50:54,819 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7ba4e899-2673-5e61-3f0e-a78f432d161e/cluster_dfc4bfcd-800b-06f9-90c7-85deae558f04/data/data3/current/BP-1086906460-172.17.0.2-1733910646032 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-11T09:50:54,819 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7ba4e899-2673-5e61-3f0e-a78f432d161e/cluster_dfc4bfcd-800b-06f9-90c7-85deae558f04/data/data4/current/BP-1086906460-172.17.0.2-1733910646032 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-11T09:50:54,819 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-11T09:50:54,819 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-11T09:50:54,819 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-11T09:50:54,822 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1a2119f0{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-11T09:50:54,822 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5f8b16a4{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-11T09:50:54,822 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-11T09:50:54,822 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5a55babc{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-11T09:50:54,822 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7acd62f3{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7ba4e899-2673-5e61-3f0e-a78f432d161e/hadoop.log.dir/,STOPPED} 2024-12-11T09:50:54,823 WARN [BP-1086906460-172.17.0.2-1733910646032 heartbeating to localhost/127.0.0.1:34879 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-11T09:50:54,823 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-11T09:50:54,824 WARN [BP-1086906460-172.17.0.2-1733910646032 heartbeating to localhost/127.0.0.1:34879 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1086906460-172.17.0.2-1733910646032 (Datanode Uuid ffd97ac0-20fd-4145-af29-8c16f89a6d6e) service to localhost/127.0.0.1:34879 2024-12-11T09:50:54,824 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-11T09:50:54,824 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7ba4e899-2673-5e61-3f0e-a78f432d161e/cluster_dfc4bfcd-800b-06f9-90c7-85deae558f04/data/data1/current/BP-1086906460-172.17.0.2-1733910646032 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-11T09:50:54,824 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7ba4e899-2673-5e61-3f0e-a78f432d161e/cluster_dfc4bfcd-800b-06f9-90c7-85deae558f04/data/data2/current/BP-1086906460-172.17.0.2-1733910646032 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-11T09:50:54,824 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-11T09:50:54,835 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@44270346{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-11T09:50:54,836 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@11292817{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-11T09:50:54,836 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-11T09:50:54,836 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2566da3f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-11T09:50:54,837 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@346b353e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7ba4e899-2673-5e61-3f0e-a78f432d161e/hadoop.log.dir/,STOPPED} 2024-12-11T09:50:54,847 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-11T09:50:54,871 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-11T09:50:54,876 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestHBaseWalOnEC#testReadWrite[0] Thread=91 (was 162), OpenFileDescriptor=439 (was 393) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=248 (was 270), ProcessCount=11 (was 11), AvailableMemoryMB=8153 (was 8402) 2024-12-11T09:50:54,882 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestHBaseWalOnEC#testReadWrite[1] Thread=91, OpenFileDescriptor=439, MaxFileDescriptor=1048576, SystemLoadAverage=248, ProcessCount=11, AvailableMemoryMB=8153 2024-12-11T09:50:54,882 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=3, rsPorts=, rsClass=null, numDataNodes=3, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-11T09:50:54,882 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7ba4e899-2673-5e61-3f0e-a78f432d161e/hadoop.log.dir so I do NOT create it in target/test-data/51500e69-9ef2-6db9-00c6-95f89a13a064 2024-12-11T09:50:54,882 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7ba4e899-2673-5e61-3f0e-a78f432d161e/hadoop.tmp.dir so I do NOT create it in target/test-data/51500e69-9ef2-6db9-00c6-95f89a13a064 2024-12-11T09:50:54,882 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/51500e69-9ef2-6db9-00c6-95f89a13a064/cluster_4fb5000e-a59a-ec64-7f7d-11c627dc53d6, deleteOnExit=true 2024-12-11T09:50:54,882 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-11T09:50:54,882 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/51500e69-9ef2-6db9-00c6-95f89a13a064/test.cache.data in system properties and HBase conf 2024-12-11T09:50:54,883 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/51500e69-9ef2-6db9-00c6-95f89a13a064/hadoop.tmp.dir in system properties and HBase conf 2024-12-11T09:50:54,883 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/51500e69-9ef2-6db9-00c6-95f89a13a064/hadoop.log.dir in system properties and HBase conf 2024-12-11T09:50:54,883 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/51500e69-9ef2-6db9-00c6-95f89a13a064/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-11T09:50:54,883 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/51500e69-9ef2-6db9-00c6-95f89a13a064/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-11T09:50:54,883 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-11T09:50:54,883 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-11T09:50:54,883 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/51500e69-9ef2-6db9-00c6-95f89a13a064/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-11T09:50:54,883 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/51500e69-9ef2-6db9-00c6-95f89a13a064/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-11T09:50:54,883 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/51500e69-9ef2-6db9-00c6-95f89a13a064/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-11T09:50:54,883 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/51500e69-9ef2-6db9-00c6-95f89a13a064/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-11T09:50:54,883 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/51500e69-9ef2-6db9-00c6-95f89a13a064/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-11T09:50:54,884 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/51500e69-9ef2-6db9-00c6-95f89a13a064/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-11T09:50:54,884 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/51500e69-9ef2-6db9-00c6-95f89a13a064/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-11T09:50:54,884 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/51500e69-9ef2-6db9-00c6-95f89a13a064/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-11T09:50:54,884 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/51500e69-9ef2-6db9-00c6-95f89a13a064/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-11T09:50:54,884 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/51500e69-9ef2-6db9-00c6-95f89a13a064/nfs.dump.dir in system properties and HBase conf 2024-12-11T09:50:54,884 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/51500e69-9ef2-6db9-00c6-95f89a13a064/java.io.tmpdir in system properties and HBase conf 2024-12-11T09:50:54,884 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/51500e69-9ef2-6db9-00c6-95f89a13a064/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-11T09:50:54,884 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/51500e69-9ef2-6db9-00c6-95f89a13a064/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-11T09:50:54,884 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/51500e69-9ef2-6db9-00c6-95f89a13a064/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-11T09:50:55,113 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-11T09:50:55,117 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-11T09:50:55,118 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-11T09:50:55,118 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-11T09:50:55,118 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-11T09:50:55,119 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-11T09:50:55,119 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6ee3f473{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/51500e69-9ef2-6db9-00c6-95f89a13a064/hadoop.log.dir/,AVAILABLE} 2024-12-11T09:50:55,119 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5973d122{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-11T09:50:55,209 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@8948308{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/51500e69-9ef2-6db9-00c6-95f89a13a064/java.io.tmpdir/jetty-localhost-37867-hadoop-hdfs-3_4_1-tests_jar-_-any-8455155527150216418/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-11T09:50:55,210 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3504f92f{HTTP/1.1, (http/1.1)}{localhost:37867} 2024-12-11T09:50:55,210 INFO [Time-limited test {}] server.Server(415): Started @11120ms 2024-12-11T09:50:55,414 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-11T09:50:55,417 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-11T09:50:55,418 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-11T09:50:55,418 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-11T09:50:55,418 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-11T09:50:55,419 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@25acd767{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/51500e69-9ef2-6db9-00c6-95f89a13a064/hadoop.log.dir/,AVAILABLE} 2024-12-11T09:50:55,419 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@796906a8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-11T09:50:55,508 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2f0ad577{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/51500e69-9ef2-6db9-00c6-95f89a13a064/java.io.tmpdir/jetty-localhost-45695-hadoop-hdfs-3_4_1-tests_jar-_-any-6552025614375239383/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-11T09:50:55,508 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@74f05853{HTTP/1.1, (http/1.1)}{localhost:45695} 2024-12-11T09:50:55,508 INFO [Time-limited test {}] server.Server(415): Started @11419ms 2024-12-11T09:50:55,509 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-11T09:50:55,538 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-11T09:50:55,541 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-11T09:50:55,543 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-11T09:50:55,543 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-11T09:50:55,543 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-11T09:50:55,545 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@136685e2{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/51500e69-9ef2-6db9-00c6-95f89a13a064/hadoop.log.dir/,AVAILABLE} 2024-12-11T09:50:55,545 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@867097b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-11T09:50:55,635 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@51a3a305{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/51500e69-9ef2-6db9-00c6-95f89a13a064/java.io.tmpdir/jetty-localhost-45527-hadoop-hdfs-3_4_1-tests_jar-_-any-6249372856609922192/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-11T09:50:55,636 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@196fd311{HTTP/1.1, (http/1.1)}{localhost:45527} 2024-12-11T09:50:55,636 INFO [Time-limited test {}] server.Server(415): Started @11546ms 2024-12-11T09:50:55,637 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-11T09:50:55,668 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-11T09:50:55,671 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-11T09:50:55,671 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-11T09:50:55,672 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-11T09:50:55,672 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-11T09:50:55,672 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@56fa1103{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/51500e69-9ef2-6db9-00c6-95f89a13a064/hadoop.log.dir/,AVAILABLE} 2024-12-11T09:50:55,672 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@287e3901{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-11T09:50:55,764 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@52ded98a{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/51500e69-9ef2-6db9-00c6-95f89a13a064/java.io.tmpdir/jetty-localhost-37937-hadoop-hdfs-3_4_1-tests_jar-_-any-7420187611281951617/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-11T09:50:55,764 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7bed397a{HTTP/1.1, (http/1.1)}{localhost:37937} 2024-12-11T09:50:55,764 INFO [Time-limited test {}] server.Server(415): Started @11675ms 2024-12-11T09:50:55,766 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-11T09:50:56,118 WARN [Thread-538 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/51500e69-9ef2-6db9-00c6-95f89a13a064/cluster_4fb5000e-a59a-ec64-7f7d-11c627dc53d6/data/data1/current/BP-1694945727-172.17.0.2-1733910654907/current, will proceed with Du for space computation calculation, 2024-12-11T09:50:56,118 WARN [Thread-539 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/51500e69-9ef2-6db9-00c6-95f89a13a064/cluster_4fb5000e-a59a-ec64-7f7d-11c627dc53d6/data/data2/current/BP-1694945727-172.17.0.2-1733910654907/current, will proceed with Du for space computation calculation, 2024-12-11T09:50:56,138 WARN [Thread-481 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-11T09:50:56,141 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x92aa7cdfab724b2e with lease ID 0x8cebb4cf586b7336: Processing first storage report for DS-7cd13ef2-eb48-4ff5-8115-12ddc853d701 from datanode DatanodeRegistration(127.0.0.1:37501, datanodeUuid=24d472da-0c6b-4dba-adef-a5361d44a88b, infoPort=36479, infoSecurePort=0, ipcPort=40117, storageInfo=lv=-57;cid=testClusterID;nsid=1573474144;c=1733910654907) 2024-12-11T09:50:56,141 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x92aa7cdfab724b2e with lease ID 0x8cebb4cf586b7336: from storage DS-7cd13ef2-eb48-4ff5-8115-12ddc853d701 node DatanodeRegistration(127.0.0.1:37501, datanodeUuid=24d472da-0c6b-4dba-adef-a5361d44a88b, infoPort=36479, infoSecurePort=0, ipcPort=40117, storageInfo=lv=-57;cid=testClusterID;nsid=1573474144;c=1733910654907), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-11T09:50:56,141 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x92aa7cdfab724b2e with lease ID 0x8cebb4cf586b7336: Processing first storage report for DS-10d76d00-901d-4912-909d-707ed1b3d894 from datanode DatanodeRegistration(127.0.0.1:37501, datanodeUuid=24d472da-0c6b-4dba-adef-a5361d44a88b, infoPort=36479, infoSecurePort=0, ipcPort=40117, storageInfo=lv=-57;cid=testClusterID;nsid=1573474144;c=1733910654907) 2024-12-11T09:50:56,142 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x92aa7cdfab724b2e with lease ID 0x8cebb4cf586b7336: from storage DS-10d76d00-901d-4912-909d-707ed1b3d894 node DatanodeRegistration(127.0.0.1:37501, datanodeUuid=24d472da-0c6b-4dba-adef-a5361d44a88b, infoPort=36479, infoSecurePort=0, ipcPort=40117, storageInfo=lv=-57;cid=testClusterID;nsid=1573474144;c=1733910654907), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-11T09:50:56,453 WARN [Thread-552 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/51500e69-9ef2-6db9-00c6-95f89a13a064/cluster_4fb5000e-a59a-ec64-7f7d-11c627dc53d6/data/data3/current/BP-1694945727-172.17.0.2-1733910654907/current, will proceed with Du for space computation calculation, 2024-12-11T09:50:56,453 WARN [Thread-553 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/51500e69-9ef2-6db9-00c6-95f89a13a064/cluster_4fb5000e-a59a-ec64-7f7d-11c627dc53d6/data/data4/current/BP-1694945727-172.17.0.2-1733910654907/current, will proceed with Du for space computation calculation, 2024-12-11T09:50:56,469 WARN [Thread-504 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-11T09:50:56,471 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xa3582efe1a7c9c11 with lease ID 0x8cebb4cf586b7337: Processing first storage report for DS-e540500a-9c10-4830-8057-7eabbd2139d5 from datanode DatanodeRegistration(127.0.0.1:33295, datanodeUuid=5b35181d-f92d-4a50-a88b-bad24ee9a81e, infoPort=38105, infoSecurePort=0, ipcPort=34283, storageInfo=lv=-57;cid=testClusterID;nsid=1573474144;c=1733910654907) 2024-12-11T09:50:56,471 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa3582efe1a7c9c11 with lease ID 0x8cebb4cf586b7337: from storage DS-e540500a-9c10-4830-8057-7eabbd2139d5 node DatanodeRegistration(127.0.0.1:33295, datanodeUuid=5b35181d-f92d-4a50-a88b-bad24ee9a81e, infoPort=38105, infoSecurePort=0, ipcPort=34283, storageInfo=lv=-57;cid=testClusterID;nsid=1573474144;c=1733910654907), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-11T09:50:56,471 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xa3582efe1a7c9c11 with lease ID 0x8cebb4cf586b7337: Processing first storage report for DS-b773701e-f671-4200-a283-02e0b4f8ab7d from datanode DatanodeRegistration(127.0.0.1:33295, datanodeUuid=5b35181d-f92d-4a50-a88b-bad24ee9a81e, infoPort=38105, infoSecurePort=0, ipcPort=34283, storageInfo=lv=-57;cid=testClusterID;nsid=1573474144;c=1733910654907) 2024-12-11T09:50:56,472 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa3582efe1a7c9c11 with lease ID 0x8cebb4cf586b7337: from storage DS-b773701e-f671-4200-a283-02e0b4f8ab7d node DatanodeRegistration(127.0.0.1:33295, datanodeUuid=5b35181d-f92d-4a50-a88b-bad24ee9a81e, infoPort=38105, infoSecurePort=0, ipcPort=34283, storageInfo=lv=-57;cid=testClusterID;nsid=1573474144;c=1733910654907), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-11T09:50:56,544 WARN [Thread-563 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/51500e69-9ef2-6db9-00c6-95f89a13a064/cluster_4fb5000e-a59a-ec64-7f7d-11c627dc53d6/data/data5/current/BP-1694945727-172.17.0.2-1733910654907/current, will proceed with Du for space computation calculation, 2024-12-11T09:50:56,544 WARN [Thread-564 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/51500e69-9ef2-6db9-00c6-95f89a13a064/cluster_4fb5000e-a59a-ec64-7f7d-11c627dc53d6/data/data6/current/BP-1694945727-172.17.0.2-1733910654907/current, will proceed with Du for space computation calculation, 2024-12-11T09:50:56,561 WARN [Thread-527 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-11T09:50:56,564 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x9ece0dbf6ba7cf0f with lease ID 0x8cebb4cf586b7338: Processing first storage report for DS-42e9e465-68a6-4959-afd5-51f5779cfd4a from datanode DatanodeRegistration(127.0.0.1:43797, datanodeUuid=cd262694-4559-4c7d-99d4-f7637c740e15, infoPort=34063, infoSecurePort=0, ipcPort=32871, storageInfo=lv=-57;cid=testClusterID;nsid=1573474144;c=1733910654907) 2024-12-11T09:50:56,564 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x9ece0dbf6ba7cf0f with lease ID 0x8cebb4cf586b7338: from storage DS-42e9e465-68a6-4959-afd5-51f5779cfd4a node DatanodeRegistration(127.0.0.1:43797, datanodeUuid=cd262694-4559-4c7d-99d4-f7637c740e15, infoPort=34063, infoSecurePort=0, ipcPort=32871, storageInfo=lv=-57;cid=testClusterID;nsid=1573474144;c=1733910654907), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-11T09:50:56,564 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x9ece0dbf6ba7cf0f with lease ID 0x8cebb4cf586b7338: Processing first storage report for DS-db35f748-328a-4685-b6ed-28ce22709a9d from datanode DatanodeRegistration(127.0.0.1:43797, datanodeUuid=cd262694-4559-4c7d-99d4-f7637c740e15, infoPort=34063, infoSecurePort=0, ipcPort=32871, storageInfo=lv=-57;cid=testClusterID;nsid=1573474144;c=1733910654907) 2024-12-11T09:50:56,564 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x9ece0dbf6ba7cf0f with lease ID 0x8cebb4cf586b7338: from storage DS-db35f748-328a-4685-b6ed-28ce22709a9d node DatanodeRegistration(127.0.0.1:43797, datanodeUuid=cd262694-4559-4c7d-99d4-f7637c740e15, infoPort=34063, infoSecurePort=0, ipcPort=32871, storageInfo=lv=-57;cid=testClusterID;nsid=1573474144;c=1733910654907), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-11T09:50:56,604 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/51500e69-9ef2-6db9-00c6-95f89a13a064 2024-12-11T09:50:56,608 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/51500e69-9ef2-6db9-00c6-95f89a13a064/cluster_4fb5000e-a59a-ec64-7f7d-11c627dc53d6/zookeeper_0, clientPort=63562, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/51500e69-9ef2-6db9-00c6-95f89a13a064/cluster_4fb5000e-a59a-ec64-7f7d-11c627dc53d6/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/51500e69-9ef2-6db9-00c6-95f89a13a064/cluster_4fb5000e-a59a-ec64-7f7d-11c627dc53d6/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-11T09:50:56,609 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=63562 2024-12-11T09:50:56,610 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-11T09:50:56,612 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-11T09:50:56,625 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37501 is added to blk_1073741825_1001 (size=7) 2024-12-11T09:50:56,626 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33295 is added to blk_1073741825_1001 (size=7) 2024-12-11T09:50:56,626 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43797 is added to blk_1073741825_1001 (size=7) 2024-12-11T09:50:56,628 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:42845/user/jenkins/test-data/2e5a5d3c-7e86-1c89-ce63-26e52d995414 with version=8 2024-12-11T09:50:56,628 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:34879/user/jenkins/test-data/0f1a18a3-705a-ac44-3173-a475130eeaf9/hbase-staging 2024-12-11T09:50:56,631 INFO [Time-limited test {}] client.ConnectionUtils(128): master/3e788d7781dc:0 server-side Connection retries=45 2024-12-11T09:50:56,631 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-11T09:50:56,631 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-11T09:50:56,631 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-11T09:50:56,631 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-11T09:50:56,631 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-11T09:50:56,631 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-11T09:50:56,631 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-11T09:50:56,632 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:37097 2024-12-11T09:50:56,634 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:37097 connecting to ZooKeeper ensemble=127.0.0.1:63562 2024-12-11T09:50:56,678 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:370970x0, quorum=127.0.0.1:63562, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-11T09:50:56,678 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:37097-0x100148012df0000 connected 2024-12-11T09:50:56,744 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-11T09:50:56,748 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-11T09:50:56,751 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:37097-0x100148012df0000, quorum=127.0.0.1:63562, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-11T09:50:56,751 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:42845/user/jenkins/test-data/2e5a5d3c-7e86-1c89-ce63-26e52d995414, hbase.cluster.distributed=false 2024-12-11T09:50:56,754 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:37097-0x100148012df0000, quorum=127.0.0.1:63562, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-11T09:50:56,755 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=37097 2024-12-11T09:50:56,756 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=37097 2024-12-11T09:50:56,756 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=37097 2024-12-11T09:50:56,757 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=37097 2024-12-11T09:50:56,757 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=37097 2024-12-11T09:50:56,773 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/3e788d7781dc:0 server-side Connection retries=45 2024-12-11T09:50:56,773 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-11T09:50:56,773 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-11T09:50:56,773 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-11T09:50:56,773 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-11T09:50:56,773 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-11T09:50:56,774 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-11T09:50:56,774 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-11T09:50:56,774 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:37345 2024-12-11T09:50:56,775 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:37345 connecting to ZooKeeper ensemble=127.0.0.1:63562 2024-12-11T09:50:56,776 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-11T09:50:56,778 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-11T09:50:56,791 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:373450x0, quorum=127.0.0.1:63562, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-11T09:50:56,791 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:37345-0x100148012df0001 connected 2024-12-11T09:50:56,791 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37345-0x100148012df0001, quorum=127.0.0.1:63562, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-11T09:50:56,791 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-11T09:50:56,792 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-11T09:50:56,793 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37345-0x100148012df0001, quorum=127.0.0.1:63562, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-11T09:50:56,794 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37345-0x100148012df0001, quorum=127.0.0.1:63562, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-11T09:50:56,794 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=37345 2024-12-11T09:50:56,795 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=37345 2024-12-11T09:50:56,795 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=37345 2024-12-11T09:50:56,795 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=37345 2024-12-11T09:50:56,796 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=37345 2024-12-11T09:50:56,815 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/3e788d7781dc:0 server-side Connection retries=45 2024-12-11T09:50:56,815 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-11T09:50:56,815 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-11T09:50:56,815 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-11T09:50:56,815 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-11T09:50:56,815 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-11T09:50:56,815 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-11T09:50:56,815 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-11T09:50:56,816 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:46013 2024-12-11T09:50:56,817 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:46013 connecting to ZooKeeper ensemble=127.0.0.1:63562 2024-12-11T09:50:56,818 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-11T09:50:56,819 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-11T09:50:56,832 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:460130x0, quorum=127.0.0.1:63562, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-11T09:50:56,833 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:46013-0x100148012df0002 connected 2024-12-11T09:50:56,833 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46013-0x100148012df0002, quorum=127.0.0.1:63562, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-11T09:50:56,833 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-11T09:50:56,834 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-11T09:50:56,835 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46013-0x100148012df0002, quorum=127.0.0.1:63562, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-11T09:50:56,837 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46013-0x100148012df0002, quorum=127.0.0.1:63562, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-11T09:50:56,837 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=46013 2024-12-11T09:50:56,838 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=46013 2024-12-11T09:50:56,838 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=46013 2024-12-11T09:50:56,838 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=46013 2024-12-11T09:50:56,839 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=46013 2024-12-11T09:50:56,854 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/3e788d7781dc:0 server-side Connection retries=45 2024-12-11T09:50:56,854 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-11T09:50:56,854 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-11T09:50:56,855 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-11T09:50:56,855 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-11T09:50:56,855 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-11T09:50:56,855 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-11T09:50:56,855 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-11T09:50:56,856 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:44149 2024-12-11T09:50:56,857 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:44149 connecting to ZooKeeper ensemble=127.0.0.1:63562 2024-12-11T09:50:56,857 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-11T09:50:56,859 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-11T09:50:56,867 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:441490x0, quorum=127.0.0.1:63562, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-11T09:50:56,868 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:441490x0, quorum=127.0.0.1:63562, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-11T09:50:56,868 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:44149-0x100148012df0003 connected 2024-12-11T09:50:56,868 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-11T09:50:56,869 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-11T09:50:56,870 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44149-0x100148012df0003, quorum=127.0.0.1:63562, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-11T09:50:56,871 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44149-0x100148012df0003, quorum=127.0.0.1:63562, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-11T09:50:56,872 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=44149 2024-12-11T09:50:56,872 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=44149 2024-12-11T09:50:56,872 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=44149 2024-12-11T09:50:56,872 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=44149 2024-12-11T09:50:56,873 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=44149 2024-12-11T09:50:56,885 DEBUG [M:0;3e788d7781dc:37097 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;3e788d7781dc:37097 2024-12-11T09:50:56,886 INFO [master/3e788d7781dc:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/3e788d7781dc,37097,1733910656630 2024-12-11T09:50:56,892 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46013-0x100148012df0002, quorum=127.0.0.1:63562, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-11T09:50:56,892 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44149-0x100148012df0003, quorum=127.0.0.1:63562, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-11T09:50:56,892 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37345-0x100148012df0001, quorum=127.0.0.1:63562, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-11T09:50:56,892 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37097-0x100148012df0000, quorum=127.0.0.1:63562, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-11T09:50:56,893 DEBUG [master/3e788d7781dc:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:37097-0x100148012df0000, quorum=127.0.0.1:63562, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/3e788d7781dc,37097,1733910656630 2024-12-11T09:50:56,901 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46013-0x100148012df0002, quorum=127.0.0.1:63562, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-11T09:50:56,901 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37097-0x100148012df0000, quorum=127.0.0.1:63562, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T09:50:56,901 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44149-0x100148012df0003, quorum=127.0.0.1:63562, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-11T09:50:56,901 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46013-0x100148012df0002, quorum=127.0.0.1:63562, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T09:50:56,901 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44149-0x100148012df0003, quorum=127.0.0.1:63562, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T09:50:56,901 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37345-0x100148012df0001, quorum=127.0.0.1:63562, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-11T09:50:56,901 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37345-0x100148012df0001, quorum=127.0.0.1:63562, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T09:50:56,902 DEBUG [master/3e788d7781dc:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:37097-0x100148012df0000, quorum=127.0.0.1:63562, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-11T09:50:56,902 INFO [master/3e788d7781dc:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/3e788d7781dc,37097,1733910656630 from backup master directory 2024-12-11T09:50:56,909 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37097-0x100148012df0000, quorum=127.0.0.1:63562, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/3e788d7781dc,37097,1733910656630 2024-12-11T09:50:56,909 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37345-0x100148012df0001, quorum=127.0.0.1:63562, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-11T09:50:56,909 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46013-0x100148012df0002, quorum=127.0.0.1:63562, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-11T09:50:56,909 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44149-0x100148012df0003, quorum=127.0.0.1:63562, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-11T09:50:56,909 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37097-0x100148012df0000, quorum=127.0.0.1:63562, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-11T09:50:56,909 WARN [master/3e788d7781dc:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-11T09:50:56,909 INFO [master/3e788d7781dc:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=3e788d7781dc,37097,1733910656630 2024-12-11T09:50:56,918 DEBUG [master/3e788d7781dc:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:42845/user/jenkins/test-data/2e5a5d3c-7e86-1c89-ce63-26e52d995414/hbase.id] with ID: db0ea377-5fa4-43d8-a039-93b96a4d5c63 2024-12-11T09:50:56,918 DEBUG [master/3e788d7781dc:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:42845/user/jenkins/test-data/2e5a5d3c-7e86-1c89-ce63-26e52d995414/.tmp/hbase.id 2024-12-11T09:50:56,928 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33295 is added to blk_1073741826_1002 (size=42) 2024-12-11T09:50:56,928 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43797 is added to blk_1073741826_1002 (size=42) 2024-12-11T09:50:56,928 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37501 is added to blk_1073741826_1002 (size=42) 2024-12-11T09:50:56,929 DEBUG [master/3e788d7781dc:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:42845/user/jenkins/test-data/2e5a5d3c-7e86-1c89-ce63-26e52d995414/.tmp/hbase.id]:[hdfs://localhost:42845/user/jenkins/test-data/2e5a5d3c-7e86-1c89-ce63-26e52d995414/hbase.id] 2024-12-11T09:50:56,944 INFO [master/3e788d7781dc:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-11T09:50:56,944 INFO [master/3e788d7781dc:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-11T09:50:56,945 INFO [master/3e788d7781dc:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-12-11T09:50:56,951 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46013-0x100148012df0002, quorum=127.0.0.1:63562, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T09:50:56,951 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37345-0x100148012df0001, quorum=127.0.0.1:63562, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T09:50:56,951 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37097-0x100148012df0000, quorum=127.0.0.1:63562, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T09:50:56,951 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44149-0x100148012df0003, quorum=127.0.0.1:63562, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T09:50:56,961 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37501 is added to blk_1073741827_1003 (size=196) 2024-12-11T09:50:56,961 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43797 is added to blk_1073741827_1003 (size=196) 2024-12-11T09:50:56,962 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33295 is added to blk_1073741827_1003 (size=196) 2024-12-11T09:50:56,963 INFO [master/3e788d7781dc:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-11T09:50:56,964 INFO [master/3e788d7781dc:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-11T09:50:56,964 INFO [master/3e788d7781dc:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-11T09:50:56,975 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43797 is added to blk_1073741828_1004 (size=1189) 2024-12-11T09:50:56,976 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37501 is added to blk_1073741828_1004 (size=1189) 2024-12-11T09:50:56,976 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33295 is added to blk_1073741828_1004 (size=1189) 2024-12-11T09:50:56,977 INFO [master/3e788d7781dc:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:42845/user/jenkins/test-data/2e5a5d3c-7e86-1c89-ce63-26e52d995414/MasterData/data/master/store 2024-12-11T09:50:56,985 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43797 is added to blk_1073741829_1005 (size=34) 2024-12-11T09:50:56,986 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33295 is added to blk_1073741829_1005 (size=34) 2024-12-11T09:50:56,986 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37501 is added to blk_1073741829_1005 (size=34) 2024-12-11T09:50:56,987 DEBUG [master/3e788d7781dc:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T09:50:56,987 DEBUG [master/3e788d7781dc:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-11T09:50:56,987 INFO [master/3e788d7781dc:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-11T09:50:56,987 DEBUG [master/3e788d7781dc:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-11T09:50:56,987 DEBUG [master/3e788d7781dc:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-11T09:50:56,987 DEBUG [master/3e788d7781dc:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-11T09:50:56,987 INFO [master/3e788d7781dc:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-11T09:50:56,988 DEBUG [master/3e788d7781dc:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733910656987Disabling compacts and flushes for region at 1733910656987Disabling writes for close at 1733910656987Writing region close event to WAL at 1733910656987Closed at 1733910656987 2024-12-11T09:50:56,989 WARN [master/3e788d7781dc:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:42845/user/jenkins/test-data/2e5a5d3c-7e86-1c89-ce63-26e52d995414/MasterData/data/master/store/.initializing 2024-12-11T09:50:56,989 DEBUG [master/3e788d7781dc:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:42845/user/jenkins/test-data/2e5a5d3c-7e86-1c89-ce63-26e52d995414/MasterData/WALs/3e788d7781dc,37097,1733910656630 2024-12-11T09:50:56,993 INFO [master/3e788d7781dc:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=3e788d7781dc%2C37097%2C1733910656630, suffix=, logDir=hdfs://localhost:42845/user/jenkins/test-data/2e5a5d3c-7e86-1c89-ce63-26e52d995414/MasterData/WALs/3e788d7781dc,37097,1733910656630, archiveDir=hdfs://localhost:42845/user/jenkins/test-data/2e5a5d3c-7e86-1c89-ce63-26e52d995414/MasterData/oldWALs, maxLogs=10 2024-12-11T09:50:56,994 INFO [master/3e788d7781dc:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3e788d7781dc%2C37097%2C1733910656630.1733910656993 2024-12-11T09:50:57,005 INFO [master/3e788d7781dc:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/2e5a5d3c-7e86-1c89-ce63-26e52d995414/MasterData/WALs/3e788d7781dc,37097,1733910656630/3e788d7781dc%2C37097%2C1733910656630.1733910656993 2024-12-11T09:50:57,008 DEBUG [master/3e788d7781dc:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36479:36479),(127.0.0.1/127.0.0.1:34063:34063),(127.0.0.1/127.0.0.1:38105:38105)] 2024-12-11T09:50:57,008 DEBUG [master/3e788d7781dc:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-11T09:50:57,009 DEBUG [master/3e788d7781dc:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T09:50:57,009 DEBUG [master/3e788d7781dc:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-11T09:50:57,009 DEBUG [master/3e788d7781dc:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-11T09:50:57,011 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-11T09:50:57,014 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-11T09:50:57,014 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T09:50:57,015 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-11T09:50:57,015 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-11T09:50:57,018 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-11T09:50:57,018 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T09:50:57,019 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T09:50:57,019 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-11T09:50:57,022 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-11T09:50:57,022 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T09:50:57,023 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T09:50:57,024 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-11T09:50:57,026 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-11T09:50:57,026 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T09:50:57,027 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T09:50:57,027 DEBUG [master/3e788d7781dc:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-11T09:50:57,028 DEBUG [master/3e788d7781dc:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42845/user/jenkins/test-data/2e5a5d3c-7e86-1c89-ce63-26e52d995414/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-11T09:50:57,028 DEBUG [master/3e788d7781dc:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42845/user/jenkins/test-data/2e5a5d3c-7e86-1c89-ce63-26e52d995414/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-11T09:50:57,030 DEBUG [master/3e788d7781dc:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-11T09:50:57,030 DEBUG [master/3e788d7781dc:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-11T09:50:57,030 DEBUG [master/3e788d7781dc:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-11T09:50:57,032 DEBUG [master/3e788d7781dc:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-11T09:50:57,035 DEBUG [master/3e788d7781dc:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42845/user/jenkins/test-data/2e5a5d3c-7e86-1c89-ce63-26e52d995414/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-11T09:50:57,036 INFO [master/3e788d7781dc:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=71014752, jitterRate=0.058202266693115234}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-11T09:50:57,036 DEBUG [master/3e788d7781dc:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733910657009Initializing all the Stores at 1733910657010 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733910657011 (+1 ms)Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733910657011Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733910657011Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733910657011Cleaning up temporary data from old regions at 1733910657030 (+19 ms)Region opened successfully at 1733910657036 (+6 ms) 2024-12-11T09:50:57,037 INFO [master/3e788d7781dc:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-11T09:50:57,041 DEBUG [master/3e788d7781dc:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3553fd97, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=3e788d7781dc/172.17.0.2:0 2024-12-11T09:50:57,042 INFO [master/3e788d7781dc:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-11T09:50:57,042 INFO [master/3e788d7781dc:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-11T09:50:57,042 INFO [master/3e788d7781dc:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-11T09:50:57,042 INFO [master/3e788d7781dc:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-11T09:50:57,043 INFO [master/3e788d7781dc:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-12-11T09:50:57,043 INFO [master/3e788d7781dc:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-12-11T09:50:57,043 INFO [master/3e788d7781dc:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-11T09:50:57,046 INFO [master/3e788d7781dc:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-11T09:50:57,047 DEBUG [master/3e788d7781dc:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37097-0x100148012df0000, quorum=127.0.0.1:63562, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-11T09:50:57,057 DEBUG [master/3e788d7781dc:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-11T09:50:57,058 INFO [master/3e788d7781dc:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-11T09:50:57,059 DEBUG [master/3e788d7781dc:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37097-0x100148012df0000, quorum=127.0.0.1:63562, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-11T09:50:57,067 DEBUG [master/3e788d7781dc:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-11T09:50:57,068 INFO [master/3e788d7781dc:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-11T09:50:57,069 DEBUG [master/3e788d7781dc:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37097-0x100148012df0000, quorum=127.0.0.1:63562, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-11T09:50:57,076 DEBUG [master/3e788d7781dc:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-11T09:50:57,077 DEBUG [master/3e788d7781dc:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37097-0x100148012df0000, quorum=127.0.0.1:63562, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-11T09:50:57,084 DEBUG [master/3e788d7781dc:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-11T09:50:57,086 DEBUG [master/3e788d7781dc:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37097-0x100148012df0000, quorum=127.0.0.1:63562, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-11T09:50:57,092 DEBUG [master/3e788d7781dc:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-11T09:50:57,101 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46013-0x100148012df0002, quorum=127.0.0.1:63562, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-11T09:50:57,101 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37345-0x100148012df0001, quorum=127.0.0.1:63562, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-11T09:50:57,101 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37097-0x100148012df0000, quorum=127.0.0.1:63562, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-11T09:50:57,101 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44149-0x100148012df0003, quorum=127.0.0.1:63562, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-11T09:50:57,101 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46013-0x100148012df0002, quorum=127.0.0.1:63562, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T09:50:57,101 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37345-0x100148012df0001, quorum=127.0.0.1:63562, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T09:50:57,101 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37097-0x100148012df0000, quorum=127.0.0.1:63562, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T09:50:57,101 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44149-0x100148012df0003, quorum=127.0.0.1:63562, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T09:50:57,102 INFO [master/3e788d7781dc:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=3e788d7781dc,37097,1733910656630, sessionid=0x100148012df0000, setting cluster-up flag (Was=false) 2024-12-11T09:50:57,117 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37345-0x100148012df0001, quorum=127.0.0.1:63562, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T09:50:57,117 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44149-0x100148012df0003, quorum=127.0.0.1:63562, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T09:50:57,117 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37097-0x100148012df0000, quorum=127.0.0.1:63562, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T09:50:57,117 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46013-0x100148012df0002, quorum=127.0.0.1:63562, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T09:50:57,143 DEBUG [master/3e788d7781dc:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-11T09:50:57,145 DEBUG [master/3e788d7781dc:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=3e788d7781dc,37097,1733910656630 2024-12-11T09:50:57,167 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44149-0x100148012df0003, quorum=127.0.0.1:63562, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T09:50:57,167 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37097-0x100148012df0000, quorum=127.0.0.1:63562, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T09:50:57,167 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37345-0x100148012df0001, quorum=127.0.0.1:63562, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T09:50:57,167 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46013-0x100148012df0002, quorum=127.0.0.1:63562, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T09:50:57,192 DEBUG [master/3e788d7781dc:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-11T09:50:57,194 DEBUG [master/3e788d7781dc:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=3e788d7781dc,37097,1733910656630 2024-12-11T09:50:57,195 INFO [master/3e788d7781dc:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:42845/user/jenkins/test-data/2e5a5d3c-7e86-1c89-ce63-26e52d995414/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-11T09:50:57,198 DEBUG [master/3e788d7781dc:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-11T09:50:57,198 INFO [master/3e788d7781dc:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-11T09:50:57,198 INFO [master/3e788d7781dc:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-11T09:50:57,199 DEBUG [master/3e788d7781dc:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 3e788d7781dc,37097,1733910656630 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-11T09:50:57,200 DEBUG [master/3e788d7781dc:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/3e788d7781dc:0, corePoolSize=5, maxPoolSize=5 2024-12-11T09:50:57,201 DEBUG [master/3e788d7781dc:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/3e788d7781dc:0, corePoolSize=5, maxPoolSize=5 2024-12-11T09:50:57,201 DEBUG [master/3e788d7781dc:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/3e788d7781dc:0, corePoolSize=5, maxPoolSize=5 2024-12-11T09:50:57,201 DEBUG [master/3e788d7781dc:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/3e788d7781dc:0, corePoolSize=5, maxPoolSize=5 2024-12-11T09:50:57,201 DEBUG [master/3e788d7781dc:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/3e788d7781dc:0, corePoolSize=10, maxPoolSize=10 2024-12-11T09:50:57,201 DEBUG [master/3e788d7781dc:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/3e788d7781dc:0, corePoolSize=1, maxPoolSize=1 2024-12-11T09:50:57,201 DEBUG [master/3e788d7781dc:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/3e788d7781dc:0, corePoolSize=2, maxPoolSize=2 2024-12-11T09:50:57,201 DEBUG [master/3e788d7781dc:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/3e788d7781dc:0, corePoolSize=1, maxPoolSize=1 2024-12-11T09:50:57,202 INFO [master/3e788d7781dc:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733910687202 2024-12-11T09:50:57,202 INFO [master/3e788d7781dc:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-11T09:50:57,202 INFO [master/3e788d7781dc:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-11T09:50:57,202 INFO [master/3e788d7781dc:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-11T09:50:57,202 INFO [master/3e788d7781dc:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-11T09:50:57,202 INFO [master/3e788d7781dc:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-11T09:50:57,202 INFO [master/3e788d7781dc:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-11T09:50:57,202 INFO [master/3e788d7781dc:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-11T09:50:57,203 INFO [master/3e788d7781dc:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-11T09:50:57,203 INFO [master/3e788d7781dc:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-11T09:50:57,203 INFO [master/3e788d7781dc:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-11T09:50:57,203 INFO [master/3e788d7781dc:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-11T09:50:57,204 INFO [master/3e788d7781dc:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-11T09:50:57,204 DEBUG [master/3e788d7781dc:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/3e788d7781dc:0:becomeActiveMaster-HFileCleaner.large.0-1733910657204,5,FailOnTimeoutGroup] 2024-12-11T09:50:57,204 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-11T09:50:57,204 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-11T09:50:57,204 DEBUG [master/3e788d7781dc:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/3e788d7781dc:0:becomeActiveMaster-HFileCleaner.small.0-1733910657204,5,FailOnTimeoutGroup] 2024-12-11T09:50:57,204 INFO [master/3e788d7781dc:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-11T09:50:57,204 INFO [master/3e788d7781dc:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-11T09:50:57,204 INFO [master/3e788d7781dc:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-11T09:50:57,204 INFO [master/3e788d7781dc:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-11T09:50:57,206 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T09:50:57,206 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-11T09:50:57,217 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33295 is added to blk_1073741831_1007 (size=1321) 2024-12-11T09:50:57,217 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37501 is added to blk_1073741831_1007 (size=1321) 2024-12-11T09:50:57,217 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43797 is added to blk_1073741831_1007 (size=1321) 2024-12-11T09:50:57,218 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:42845/user/jenkins/test-data/2e5a5d3c-7e86-1c89-ce63-26e52d995414/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-11T09:50:57,219 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:42845/user/jenkins/test-data/2e5a5d3c-7e86-1c89-ce63-26e52d995414 2024-12-11T09:50:57,229 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43797 is added to blk_1073741832_1008 (size=32) 2024-12-11T09:50:57,229 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33295 is added to blk_1073741832_1008 (size=32) 2024-12-11T09:50:57,229 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37501 is added to blk_1073741832_1008 (size=32) 2024-12-11T09:50:57,231 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T09:50:57,232 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-11T09:50:57,233 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-11T09:50:57,233 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T09:50:57,234 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-11T09:50:57,234 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-11T09:50:57,235 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-11T09:50:57,236 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T09:50:57,236 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-11T09:50:57,236 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-11T09:50:57,238 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-11T09:50:57,238 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T09:50:57,239 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-11T09:50:57,239 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-11T09:50:57,240 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-11T09:50:57,241 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T09:50:57,241 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-11T09:50:57,241 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-11T09:50:57,242 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42845/user/jenkins/test-data/2e5a5d3c-7e86-1c89-ce63-26e52d995414/data/hbase/meta/1588230740 2024-12-11T09:50:57,243 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42845/user/jenkins/test-data/2e5a5d3c-7e86-1c89-ce63-26e52d995414/data/hbase/meta/1588230740 2024-12-11T09:50:57,244 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-11T09:50:57,245 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-11T09:50:57,245 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-11T09:50:57,246 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-11T09:50:57,249 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42845/user/jenkins/test-data/2e5a5d3c-7e86-1c89-ce63-26e52d995414/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-11T09:50:57,249 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=66905042, jitterRate=-0.003037184476852417}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-11T09:50:57,250 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733910657231Initializing all the Stores at 1733910657232 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733910657232Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733910657232Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733910657232Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733910657232Cleaning up temporary data from old regions at 1733910657245 (+13 ms)Region opened successfully at 1733910657250 (+5 ms) 2024-12-11T09:50:57,250 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-11T09:50:57,250 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-11T09:50:57,250 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-11T09:50:57,250 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-11T09:50:57,250 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-11T09:50:57,250 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-11T09:50:57,251 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733910657250Disabling compacts and flushes for region at 1733910657250Disabling writes for close at 1733910657250Writing region close event to WAL at 1733910657250Closed at 1733910657250 2024-12-11T09:50:57,252 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-11T09:50:57,252 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-11T09:50:57,252 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-11T09:50:57,254 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-11T09:50:57,255 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-11T09:50:57,275 INFO [RS:0;3e788d7781dc:37345 {}] regionserver.HRegionServer(746): ClusterId : db0ea377-5fa4-43d8-a039-93b96a4d5c63 2024-12-11T09:50:57,275 INFO [RS:2;3e788d7781dc:44149 {}] regionserver.HRegionServer(746): ClusterId : db0ea377-5fa4-43d8-a039-93b96a4d5c63 2024-12-11T09:50:57,275 INFO [RS:1;3e788d7781dc:46013 {}] regionserver.HRegionServer(746): ClusterId : db0ea377-5fa4-43d8-a039-93b96a4d5c63 2024-12-11T09:50:57,275 DEBUG [RS:1;3e788d7781dc:46013 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-11T09:50:57,275 DEBUG [RS:0;3e788d7781dc:37345 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-11T09:50:57,275 DEBUG [RS:2;3e788d7781dc:44149 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-11T09:50:57,285 DEBUG [RS:2;3e788d7781dc:44149 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-11T09:50:57,285 DEBUG [RS:0;3e788d7781dc:37345 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-11T09:50:57,285 DEBUG [RS:1;3e788d7781dc:46013 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-11T09:50:57,286 DEBUG [RS:2;3e788d7781dc:44149 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-11T09:50:57,286 DEBUG [RS:0;3e788d7781dc:37345 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-11T09:50:57,286 DEBUG [RS:1;3e788d7781dc:46013 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-11T09:50:57,302 DEBUG [RS:2;3e788d7781dc:44149 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-11T09:50:57,303 DEBUG [RS:2;3e788d7781dc:44149 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@79347ba4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=3e788d7781dc/172.17.0.2:0 2024-12-11T09:50:57,303 DEBUG [RS:0;3e788d7781dc:37345 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-11T09:50:57,303 DEBUG [RS:1;3e788d7781dc:46013 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-11T09:50:57,304 DEBUG [RS:0;3e788d7781dc:37345 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@29b98e12, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=3e788d7781dc/172.17.0.2:0 2024-12-11T09:50:57,304 DEBUG [RS:1;3e788d7781dc:46013 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3786af13, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=3e788d7781dc/172.17.0.2:0 2024-12-11T09:50:57,317 DEBUG [RS:2;3e788d7781dc:44149 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:2;3e788d7781dc:44149 2024-12-11T09:50:57,317 INFO [RS:2;3e788d7781dc:44149 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-11T09:50:57,317 INFO [RS:2;3e788d7781dc:44149 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-11T09:50:57,317 DEBUG [RS:2;3e788d7781dc:44149 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-11T09:50:57,318 INFO [RS:2;3e788d7781dc:44149 {}] regionserver.HRegionServer(2659): reportForDuty to master=3e788d7781dc,37097,1733910656630 with port=44149, startcode=1733910656854 2024-12-11T09:50:57,318 DEBUG [RS:2;3e788d7781dc:44149 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-11T09:50:57,320 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38947, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.5 (auth:SIMPLE), service=RegionServerStatusService 2024-12-11T09:50:57,321 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37097 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 3e788d7781dc,44149,1733910656854 2024-12-11T09:50:57,321 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37097 {}] master.ServerManager(517): Registering regionserver=3e788d7781dc,44149,1733910656854 2024-12-11T09:50:57,323 DEBUG [RS:0;3e788d7781dc:37345 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;3e788d7781dc:37345 2024-12-11T09:50:57,323 DEBUG [RS:1;3e788d7781dc:46013 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;3e788d7781dc:46013 2024-12-11T09:50:57,323 DEBUG [RS:2;3e788d7781dc:44149 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:42845/user/jenkins/test-data/2e5a5d3c-7e86-1c89-ce63-26e52d995414 2024-12-11T09:50:57,323 DEBUG [RS:2;3e788d7781dc:44149 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:42845 2024-12-11T09:50:57,323 INFO [RS:1;3e788d7781dc:46013 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-11T09:50:57,323 DEBUG [RS:2;3e788d7781dc:44149 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-11T09:50:57,323 INFO [RS:0;3e788d7781dc:37345 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-11T09:50:57,323 INFO [RS:1;3e788d7781dc:46013 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-11T09:50:57,323 INFO [RS:0;3e788d7781dc:37345 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-11T09:50:57,323 DEBUG [RS:0;3e788d7781dc:37345 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-11T09:50:57,323 DEBUG [RS:1;3e788d7781dc:46013 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-11T09:50:57,324 INFO [RS:0;3e788d7781dc:37345 {}] regionserver.HRegionServer(2659): reportForDuty to master=3e788d7781dc,37097,1733910656630 with port=37345, startcode=1733910656773 2024-12-11T09:50:57,324 INFO [RS:1;3e788d7781dc:46013 {}] regionserver.HRegionServer(2659): reportForDuty to master=3e788d7781dc,37097,1733910656630 with port=46013, startcode=1733910656814 2024-12-11T09:50:57,324 DEBUG [RS:1;3e788d7781dc:46013 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-11T09:50:57,324 DEBUG [RS:0;3e788d7781dc:37345 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-11T09:50:57,326 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48489, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.4 (auth:SIMPLE), service=RegionServerStatusService 2024-12-11T09:50:57,326 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42573, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.3 (auth:SIMPLE), service=RegionServerStatusService 2024-12-11T09:50:57,327 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37097 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 3e788d7781dc,46013,1733910656814 2024-12-11T09:50:57,327 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37097 {}] master.ServerManager(517): Registering regionserver=3e788d7781dc,46013,1733910656814 2024-12-11T09:50:57,328 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37097 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 3e788d7781dc,37345,1733910656773 2024-12-11T09:50:57,329 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37097 {}] master.ServerManager(517): Registering regionserver=3e788d7781dc,37345,1733910656773 2024-12-11T09:50:57,329 DEBUG [RS:1;3e788d7781dc:46013 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:42845/user/jenkins/test-data/2e5a5d3c-7e86-1c89-ce63-26e52d995414 2024-12-11T09:50:57,329 DEBUG [RS:1;3e788d7781dc:46013 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:42845 2024-12-11T09:50:57,329 DEBUG [RS:1;3e788d7781dc:46013 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-11T09:50:57,330 DEBUG [RS:0;3e788d7781dc:37345 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:42845/user/jenkins/test-data/2e5a5d3c-7e86-1c89-ce63-26e52d995414 2024-12-11T09:50:57,330 DEBUG [RS:0;3e788d7781dc:37345 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:42845 2024-12-11T09:50:57,330 DEBUG [RS:0;3e788d7781dc:37345 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-11T09:50:57,332 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37097-0x100148012df0000, quorum=127.0.0.1:63562, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-11T09:50:57,356 DEBUG [RS:2;3e788d7781dc:44149 {}] zookeeper.ZKUtil(111): regionserver:44149-0x100148012df0003, quorum=127.0.0.1:63562, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/3e788d7781dc,44149,1733910656854 2024-12-11T09:50:57,356 WARN [RS:2;3e788d7781dc:44149 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-11T09:50:57,356 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [3e788d7781dc,37345,1733910656773] 2024-12-11T09:50:57,356 INFO [RS:2;3e788d7781dc:44149 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-11T09:50:57,356 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [3e788d7781dc,46013,1733910656814] 2024-12-11T09:50:57,356 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [3e788d7781dc,44149,1733910656854] 2024-12-11T09:50:57,356 DEBUG [RS:1;3e788d7781dc:46013 {}] zookeeper.ZKUtil(111): regionserver:46013-0x100148012df0002, quorum=127.0.0.1:63562, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/3e788d7781dc,46013,1733910656814 2024-12-11T09:50:57,356 DEBUG [RS:0;3e788d7781dc:37345 {}] zookeeper.ZKUtil(111): regionserver:37345-0x100148012df0001, quorum=127.0.0.1:63562, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/3e788d7781dc,37345,1733910656773 2024-12-11T09:50:57,356 DEBUG [RS:2;3e788d7781dc:44149 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:42845/user/jenkins/test-data/2e5a5d3c-7e86-1c89-ce63-26e52d995414/WALs/3e788d7781dc,44149,1733910656854 2024-12-11T09:50:57,356 WARN [RS:1;3e788d7781dc:46013 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-11T09:50:57,356 WARN [RS:0;3e788d7781dc:37345 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-11T09:50:57,356 INFO [RS:1;3e788d7781dc:46013 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-11T09:50:57,356 INFO [RS:0;3e788d7781dc:37345 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-11T09:50:57,356 DEBUG [RS:1;3e788d7781dc:46013 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:42845/user/jenkins/test-data/2e5a5d3c-7e86-1c89-ce63-26e52d995414/WALs/3e788d7781dc,46013,1733910656814 2024-12-11T09:50:57,357 DEBUG [RS:0;3e788d7781dc:37345 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:42845/user/jenkins/test-data/2e5a5d3c-7e86-1c89-ce63-26e52d995414/WALs/3e788d7781dc,37345,1733910656773 2024-12-11T09:50:57,363 INFO [RS:1;3e788d7781dc:46013 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-11T09:50:57,363 INFO [RS:2;3e788d7781dc:44149 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-11T09:50:57,366 INFO [RS:1;3e788d7781dc:46013 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-11T09:50:57,366 INFO [RS:0;3e788d7781dc:37345 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-11T09:50:57,368 INFO [RS:1;3e788d7781dc:46013 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-11T09:50:57,368 INFO [RS:1;3e788d7781dc:46013 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-11T09:50:57,368 INFO [RS:1;3e788d7781dc:46013 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-11T09:50:57,369 INFO [RS:2;3e788d7781dc:44149 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-11T09:50:57,369 INFO [RS:2;3e788d7781dc:44149 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-11T09:50:57,369 INFO [RS:1;3e788d7781dc:46013 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-11T09:50:57,369 INFO [RS:2;3e788d7781dc:44149 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-11T09:50:57,369 INFO [RS:1;3e788d7781dc:46013 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-11T09:50:57,370 INFO [RS:2;3e788d7781dc:44149 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-11T09:50:57,370 DEBUG [RS:1;3e788d7781dc:46013 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/3e788d7781dc:0, corePoolSize=1, maxPoolSize=1 2024-12-11T09:50:57,370 DEBUG [RS:1;3e788d7781dc:46013 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/3e788d7781dc:0, corePoolSize=1, maxPoolSize=1 2024-12-11T09:50:57,370 DEBUG [RS:1;3e788d7781dc:46013 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/3e788d7781dc:0, corePoolSize=1, maxPoolSize=1 2024-12-11T09:50:57,370 DEBUG [RS:1;3e788d7781dc:46013 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/3e788d7781dc:0, corePoolSize=1, maxPoolSize=1 2024-12-11T09:50:57,370 DEBUG [RS:1;3e788d7781dc:46013 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/3e788d7781dc:0, corePoolSize=1, maxPoolSize=1 2024-12-11T09:50:57,370 DEBUG [RS:1;3e788d7781dc:46013 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/3e788d7781dc:0, corePoolSize=2, maxPoolSize=2 2024-12-11T09:50:57,370 DEBUG [RS:1;3e788d7781dc:46013 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/3e788d7781dc:0, corePoolSize=1, maxPoolSize=1 2024-12-11T09:50:57,371 DEBUG [RS:1;3e788d7781dc:46013 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/3e788d7781dc:0, corePoolSize=1, maxPoolSize=1 2024-12-11T09:50:57,371 DEBUG [RS:1;3e788d7781dc:46013 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/3e788d7781dc:0, corePoolSize=1, maxPoolSize=1 2024-12-11T09:50:57,371 INFO [RS:2;3e788d7781dc:44149 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-11T09:50:57,371 DEBUG [RS:1;3e788d7781dc:46013 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/3e788d7781dc:0, corePoolSize=1, maxPoolSize=1 2024-12-11T09:50:57,371 DEBUG [RS:1;3e788d7781dc:46013 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/3e788d7781dc:0, corePoolSize=1, maxPoolSize=1 2024-12-11T09:50:57,371 INFO [RS:2;3e788d7781dc:44149 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-11T09:50:57,371 DEBUG [RS:1;3e788d7781dc:46013 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/3e788d7781dc:0, corePoolSize=1, maxPoolSize=1 2024-12-11T09:50:57,371 INFO [RS:0;3e788d7781dc:37345 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-11T09:50:57,371 DEBUG [RS:2;3e788d7781dc:44149 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/3e788d7781dc:0, corePoolSize=1, maxPoolSize=1 2024-12-11T09:50:57,371 DEBUG [RS:1;3e788d7781dc:46013 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/3e788d7781dc:0, corePoolSize=3, maxPoolSize=3 2024-12-11T09:50:57,371 DEBUG [RS:2;3e788d7781dc:44149 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/3e788d7781dc:0, corePoolSize=1, maxPoolSize=1 2024-12-11T09:50:57,371 DEBUG [RS:1;3e788d7781dc:46013 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/3e788d7781dc:0, corePoolSize=3, maxPoolSize=3 2024-12-11T09:50:57,371 DEBUG [RS:2;3e788d7781dc:44149 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/3e788d7781dc:0, corePoolSize=1, maxPoolSize=1 2024-12-11T09:50:57,371 DEBUG [RS:2;3e788d7781dc:44149 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/3e788d7781dc:0, corePoolSize=1, maxPoolSize=1 2024-12-11T09:50:57,372 DEBUG [RS:2;3e788d7781dc:44149 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/3e788d7781dc:0, corePoolSize=1, maxPoolSize=1 2024-12-11T09:50:57,372 DEBUG [RS:2;3e788d7781dc:44149 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/3e788d7781dc:0, corePoolSize=2, maxPoolSize=2 2024-12-11T09:50:57,372 DEBUG [RS:2;3e788d7781dc:44149 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/3e788d7781dc:0, corePoolSize=1, maxPoolSize=1 2024-12-11T09:50:57,372 DEBUG [RS:2;3e788d7781dc:44149 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/3e788d7781dc:0, corePoolSize=1, maxPoolSize=1 2024-12-11T09:50:57,372 DEBUG [RS:2;3e788d7781dc:44149 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/3e788d7781dc:0, corePoolSize=1, maxPoolSize=1 2024-12-11T09:50:57,372 DEBUG [RS:2;3e788d7781dc:44149 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/3e788d7781dc:0, corePoolSize=1, maxPoolSize=1 2024-12-11T09:50:57,372 DEBUG [RS:2;3e788d7781dc:44149 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/3e788d7781dc:0, corePoolSize=1, maxPoolSize=1 2024-12-11T09:50:57,372 DEBUG [RS:2;3e788d7781dc:44149 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/3e788d7781dc:0, corePoolSize=1, maxPoolSize=1 2024-12-11T09:50:57,372 DEBUG [RS:2;3e788d7781dc:44149 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/3e788d7781dc:0, corePoolSize=3, maxPoolSize=3 2024-12-11T09:50:57,372 DEBUG [RS:2;3e788d7781dc:44149 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/3e788d7781dc:0, corePoolSize=3, maxPoolSize=3 2024-12-11T09:50:57,374 INFO [RS:0;3e788d7781dc:37345 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-11T09:50:57,374 INFO [RS:0;3e788d7781dc:37345 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-11T09:50:57,374 INFO [RS:0;3e788d7781dc:37345 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-11T09:50:57,375 INFO [RS:1;3e788d7781dc:46013 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-11T09:50:57,375 INFO [RS:2;3e788d7781dc:44149 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-11T09:50:57,375 INFO [RS:1;3e788d7781dc:46013 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-11T09:50:57,375 INFO [RS:2;3e788d7781dc:44149 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-11T09:50:57,375 INFO [RS:1;3e788d7781dc:46013 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-11T09:50:57,375 INFO [RS:2;3e788d7781dc:44149 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-11T09:50:57,375 INFO [RS:1;3e788d7781dc:46013 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-11T09:50:57,375 INFO [RS:2;3e788d7781dc:44149 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-11T09:50:57,375 INFO [RS:1;3e788d7781dc:46013 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-11T09:50:57,375 INFO [RS:2;3e788d7781dc:44149 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-11T09:50:57,375 INFO [RS:1;3e788d7781dc:46013 {}] hbase.ChoreService(168): Chore ScheduledChore name=3e788d7781dc,46013,1733910656814-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-11T09:50:57,375 INFO [RS:2;3e788d7781dc:44149 {}] hbase.ChoreService(168): Chore ScheduledChore name=3e788d7781dc,44149,1733910656854-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-11T09:50:57,375 INFO [RS:0;3e788d7781dc:37345 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-11T09:50:57,376 INFO [RS:0;3e788d7781dc:37345 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-11T09:50:57,376 DEBUG [RS:0;3e788d7781dc:37345 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/3e788d7781dc:0, corePoolSize=1, maxPoolSize=1 2024-12-11T09:50:57,376 DEBUG [RS:0;3e788d7781dc:37345 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/3e788d7781dc:0, corePoolSize=1, maxPoolSize=1 2024-12-11T09:50:57,376 DEBUG [RS:0;3e788d7781dc:37345 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/3e788d7781dc:0, corePoolSize=1, maxPoolSize=1 2024-12-11T09:50:57,376 DEBUG [RS:0;3e788d7781dc:37345 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/3e788d7781dc:0, corePoolSize=1, maxPoolSize=1 2024-12-11T09:50:57,376 DEBUG [RS:0;3e788d7781dc:37345 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/3e788d7781dc:0, corePoolSize=1, maxPoolSize=1 2024-12-11T09:50:57,376 DEBUG [RS:0;3e788d7781dc:37345 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/3e788d7781dc:0, corePoolSize=2, maxPoolSize=2 2024-12-11T09:50:57,376 DEBUG [RS:0;3e788d7781dc:37345 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/3e788d7781dc:0, corePoolSize=1, maxPoolSize=1 2024-12-11T09:50:57,376 DEBUG [RS:0;3e788d7781dc:37345 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/3e788d7781dc:0, corePoolSize=1, maxPoolSize=1 2024-12-11T09:50:57,377 DEBUG [RS:0;3e788d7781dc:37345 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/3e788d7781dc:0, corePoolSize=1, maxPoolSize=1 2024-12-11T09:50:57,377 DEBUG [RS:0;3e788d7781dc:37345 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/3e788d7781dc:0, corePoolSize=1, maxPoolSize=1 2024-12-11T09:50:57,377 DEBUG [RS:0;3e788d7781dc:37345 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/3e788d7781dc:0, corePoolSize=1, maxPoolSize=1 2024-12-11T09:50:57,377 DEBUG [RS:0;3e788d7781dc:37345 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/3e788d7781dc:0, corePoolSize=1, maxPoolSize=1 2024-12-11T09:50:57,377 DEBUG [RS:0;3e788d7781dc:37345 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/3e788d7781dc:0, corePoolSize=3, maxPoolSize=3 2024-12-11T09:50:57,377 DEBUG [RS:0;3e788d7781dc:37345 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/3e788d7781dc:0, corePoolSize=3, maxPoolSize=3 2024-12-11T09:50:57,377 INFO [RS:0;3e788d7781dc:37345 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-11T09:50:57,377 INFO [RS:0;3e788d7781dc:37345 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-11T09:50:57,377 INFO [RS:0;3e788d7781dc:37345 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-11T09:50:57,378 INFO [RS:0;3e788d7781dc:37345 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-11T09:50:57,378 INFO [RS:0;3e788d7781dc:37345 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-11T09:50:57,378 INFO [RS:0;3e788d7781dc:37345 {}] hbase.ChoreService(168): Chore ScheduledChore name=3e788d7781dc,37345,1733910656773-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-11T09:50:57,388 INFO [RS:1;3e788d7781dc:46013 {}] regionserver.HeapMemoryManager(213): Starting, tuneOn=false 2024-12-11T09:50:57,388 INFO [RS:2;3e788d7781dc:44149 {}] regionserver.HeapMemoryManager(213): Starting, tuneOn=false 2024-12-11T09:50:57,389 INFO [RS:1;3e788d7781dc:46013 {}] hbase.ChoreService(168): Chore ScheduledChore name=3e788d7781dc,46013,1733910656814-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-11T09:50:57,389 INFO [RS:2;3e788d7781dc:44149 {}] hbase.ChoreService(168): Chore ScheduledChore name=3e788d7781dc,44149,1733910656854-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-11T09:50:57,389 INFO [RS:2;3e788d7781dc:44149 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-11T09:50:57,389 INFO [RS:1;3e788d7781dc:46013 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-11T09:50:57,389 INFO [RS:2;3e788d7781dc:44149 {}] regionserver.Replication(171): 3e788d7781dc,44149,1733910656854 started 2024-12-11T09:50:57,389 INFO [RS:1;3e788d7781dc:46013 {}] regionserver.Replication(171): 3e788d7781dc,46013,1733910656814 started 2024-12-11T09:50:57,389 INFO [RS:0;3e788d7781dc:37345 {}] regionserver.HeapMemoryManager(213): Starting, tuneOn=false 2024-12-11T09:50:57,390 INFO [RS:0;3e788d7781dc:37345 {}] hbase.ChoreService(168): Chore ScheduledChore name=3e788d7781dc,37345,1733910656773-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-11T09:50:57,390 INFO [RS:0;3e788d7781dc:37345 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-11T09:50:57,390 INFO [RS:0;3e788d7781dc:37345 {}] regionserver.Replication(171): 3e788d7781dc,37345,1733910656773 started 2024-12-11T09:50:57,401 INFO [RS:1;3e788d7781dc:46013 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-11T09:50:57,401 INFO [RS:2;3e788d7781dc:44149 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-11T09:50:57,401 INFO [RS:0;3e788d7781dc:37345 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-11T09:50:57,401 INFO [RS:2;3e788d7781dc:44149 {}] regionserver.HRegionServer(1482): Serving as 3e788d7781dc,44149,1733910656854, RpcServer on 3e788d7781dc/172.17.0.2:44149, sessionid=0x100148012df0003 2024-12-11T09:50:57,401 INFO [RS:1;3e788d7781dc:46013 {}] regionserver.HRegionServer(1482): Serving as 3e788d7781dc,46013,1733910656814, RpcServer on 3e788d7781dc/172.17.0.2:46013, sessionid=0x100148012df0002 2024-12-11T09:50:57,401 INFO [RS:0;3e788d7781dc:37345 {}] regionserver.HRegionServer(1482): Serving as 3e788d7781dc,37345,1733910656773, RpcServer on 3e788d7781dc/172.17.0.2:37345, sessionid=0x100148012df0001 2024-12-11T09:50:57,401 DEBUG [RS:2;3e788d7781dc:44149 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-11T09:50:57,401 DEBUG [RS:0;3e788d7781dc:37345 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-11T09:50:57,401 DEBUG [RS:1;3e788d7781dc:46013 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-11T09:50:57,401 DEBUG [RS:2;3e788d7781dc:44149 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 3e788d7781dc,44149,1733910656854 2024-12-11T09:50:57,401 DEBUG [RS:0;3e788d7781dc:37345 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 3e788d7781dc,37345,1733910656773 2024-12-11T09:50:57,401 DEBUG [RS:1;3e788d7781dc:46013 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 3e788d7781dc,46013,1733910656814 2024-12-11T09:50:57,401 DEBUG [RS:2;3e788d7781dc:44149 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '3e788d7781dc,44149,1733910656854' 2024-12-11T09:50:57,401 DEBUG [RS:0;3e788d7781dc:37345 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '3e788d7781dc,37345,1733910656773' 2024-12-11T09:50:57,401 DEBUG [RS:1;3e788d7781dc:46013 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '3e788d7781dc,46013,1733910656814' 2024-12-11T09:50:57,401 DEBUG [RS:2;3e788d7781dc:44149 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-11T09:50:57,401 DEBUG [RS:0;3e788d7781dc:37345 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-11T09:50:57,401 DEBUG [RS:1;3e788d7781dc:46013 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-11T09:50:57,402 DEBUG [RS:1;3e788d7781dc:46013 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-11T09:50:57,402 DEBUG [RS:2;3e788d7781dc:44149 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-11T09:50:57,402 DEBUG [RS:0;3e788d7781dc:37345 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-11T09:50:57,402 DEBUG [RS:1;3e788d7781dc:46013 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-11T09:50:57,402 DEBUG [RS:1;3e788d7781dc:46013 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-11T09:50:57,402 DEBUG [RS:2;3e788d7781dc:44149 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-11T09:50:57,402 DEBUG [RS:1;3e788d7781dc:46013 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 3e788d7781dc,46013,1733910656814 2024-12-11T09:50:57,402 DEBUG [RS:2;3e788d7781dc:44149 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-11T09:50:57,402 DEBUG [RS:0;3e788d7781dc:37345 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-11T09:50:57,402 DEBUG [RS:1;3e788d7781dc:46013 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '3e788d7781dc,46013,1733910656814' 2024-12-11T09:50:57,403 DEBUG [RS:1;3e788d7781dc:46013 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-11T09:50:57,403 DEBUG [RS:2;3e788d7781dc:44149 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 3e788d7781dc,44149,1733910656854 2024-12-11T09:50:57,403 DEBUG [RS:0;3e788d7781dc:37345 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-11T09:50:57,403 DEBUG [RS:2;3e788d7781dc:44149 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '3e788d7781dc,44149,1733910656854' 2024-12-11T09:50:57,403 DEBUG [RS:0;3e788d7781dc:37345 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 3e788d7781dc,37345,1733910656773 2024-12-11T09:50:57,403 DEBUG [RS:2;3e788d7781dc:44149 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-11T09:50:57,403 DEBUG [RS:0;3e788d7781dc:37345 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '3e788d7781dc,37345,1733910656773' 2024-12-11T09:50:57,403 DEBUG [RS:0;3e788d7781dc:37345 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-11T09:50:57,403 DEBUG [RS:1;3e788d7781dc:46013 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-11T09:50:57,403 DEBUG [RS:2;3e788d7781dc:44149 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-11T09:50:57,403 DEBUG [RS:0;3e788d7781dc:37345 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-11T09:50:57,403 DEBUG [RS:1;3e788d7781dc:46013 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-11T09:50:57,403 INFO [RS:1;3e788d7781dc:46013 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-11T09:50:57,403 INFO [RS:1;3e788d7781dc:46013 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-11T09:50:57,403 DEBUG [RS:2;3e788d7781dc:44149 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-11T09:50:57,403 DEBUG [RS:0;3e788d7781dc:37345 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-11T09:50:57,403 INFO [RS:2;3e788d7781dc:44149 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-11T09:50:57,403 INFO [RS:0;3e788d7781dc:37345 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-11T09:50:57,403 INFO [RS:2;3e788d7781dc:44149 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-11T09:50:57,403 INFO [RS:0;3e788d7781dc:37345 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-11T09:50:57,405 WARN [3e788d7781dc:37097 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-11T09:50:57,509 INFO [RS:1;3e788d7781dc:46013 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=3e788d7781dc%2C46013%2C1733910656814, suffix=, logDir=hdfs://localhost:42845/user/jenkins/test-data/2e5a5d3c-7e86-1c89-ce63-26e52d995414/WALs/3e788d7781dc,46013,1733910656814, archiveDir=hdfs://localhost:42845/user/jenkins/test-data/2e5a5d3c-7e86-1c89-ce63-26e52d995414/oldWALs, maxLogs=32 2024-12-11T09:50:57,509 INFO [RS:0;3e788d7781dc:37345 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=3e788d7781dc%2C37345%2C1733910656773, suffix=, logDir=hdfs://localhost:42845/user/jenkins/test-data/2e5a5d3c-7e86-1c89-ce63-26e52d995414/WALs/3e788d7781dc,37345,1733910656773, archiveDir=hdfs://localhost:42845/user/jenkins/test-data/2e5a5d3c-7e86-1c89-ce63-26e52d995414/oldWALs, maxLogs=32 2024-12-11T09:50:57,509 INFO [RS:2;3e788d7781dc:44149 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=3e788d7781dc%2C44149%2C1733910656854, suffix=, logDir=hdfs://localhost:42845/user/jenkins/test-data/2e5a5d3c-7e86-1c89-ce63-26e52d995414/WALs/3e788d7781dc,44149,1733910656854, archiveDir=hdfs://localhost:42845/user/jenkins/test-data/2e5a5d3c-7e86-1c89-ce63-26e52d995414/oldWALs, maxLogs=32 2024-12-11T09:50:57,513 INFO [RS:1;3e788d7781dc:46013 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3e788d7781dc%2C46013%2C1733910656814.1733910657513 2024-12-11T09:50:57,513 INFO [RS:0;3e788d7781dc:37345 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3e788d7781dc%2C37345%2C1733910656773.1733910657513 2024-12-11T09:50:57,514 INFO [RS:2;3e788d7781dc:44149 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3e788d7781dc%2C44149%2C1733910656854.1733910657514 2024-12-11T09:50:57,520 INFO [RS:1;3e788d7781dc:46013 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/2e5a5d3c-7e86-1c89-ce63-26e52d995414/WALs/3e788d7781dc,46013,1733910656814/3e788d7781dc%2C46013%2C1733910656814.1733910657513 2024-12-11T09:50:57,522 INFO [RS:0;3e788d7781dc:37345 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/2e5a5d3c-7e86-1c89-ce63-26e52d995414/WALs/3e788d7781dc,37345,1733910656773/3e788d7781dc%2C37345%2C1733910656773.1733910657513 2024-12-11T09:50:57,523 INFO [RS:2;3e788d7781dc:44149 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/2e5a5d3c-7e86-1c89-ce63-26e52d995414/WALs/3e788d7781dc,44149,1733910656854/3e788d7781dc%2C44149%2C1733910656854.1733910657514 2024-12-11T09:50:57,526 DEBUG [RS:1;3e788d7781dc:46013 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34063:34063),(127.0.0.1/127.0.0.1:36479:36479),(127.0.0.1/127.0.0.1:38105:38105)] 2024-12-11T09:50:57,527 DEBUG [RS:0;3e788d7781dc:37345 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34063:34063),(127.0.0.1/127.0.0.1:36479:36479),(127.0.0.1/127.0.0.1:38105:38105)] 2024-12-11T09:50:57,528 DEBUG [RS:2;3e788d7781dc:44149 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34063:34063),(127.0.0.1/127.0.0.1:36479:36479),(127.0.0.1/127.0.0.1:38105:38105)] 2024-12-11T09:50:57,656 DEBUG [3e788d7781dc:37097 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=3, allServersCount=3 2024-12-11T09:50:57,656 DEBUG [3e788d7781dc:37097 {}] balancer.BalancerClusterState(204): Hosts are {3e788d7781dc=0} racks are {/default-rack=0} 2024-12-11T09:50:57,658 DEBUG [3e788d7781dc:37097 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-11T09:50:57,658 DEBUG [3e788d7781dc:37097 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-11T09:50:57,658 DEBUG [3e788d7781dc:37097 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-11T09:50:57,658 DEBUG [3e788d7781dc:37097 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-11T09:50:57,658 DEBUG [3e788d7781dc:37097 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-11T09:50:57,658 DEBUG [3e788d7781dc:37097 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-11T09:50:57,658 INFO [3e788d7781dc:37097 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-11T09:50:57,658 INFO [3e788d7781dc:37097 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-11T09:50:57,658 INFO [3e788d7781dc:37097 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-11T09:50:57,658 DEBUG [3e788d7781dc:37097 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-11T09:50:57,659 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=3e788d7781dc,37345,1733910656773 2024-12-11T09:50:57,660 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 3e788d7781dc,37345,1733910656773, state=OPENING 2024-12-11T09:50:57,674 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-11T09:50:57,684 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37345-0x100148012df0001, quorum=127.0.0.1:63562, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T09:50:57,684 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44149-0x100148012df0003, quorum=127.0.0.1:63562, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T09:50:57,684 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37097-0x100148012df0000, quorum=127.0.0.1:63562, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T09:50:57,684 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46013-0x100148012df0002, quorum=127.0.0.1:63562, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T09:50:57,686 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-11T09:50:57,686 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-11T09:50:57,686 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-11T09:50:57,686 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-11T09:50:57,686 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=3e788d7781dc,37345,1733910656773}] 2024-12-11T09:50:57,687 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-11T09:50:57,843 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-11T09:50:57,846 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36335, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-11T09:50:57,854 INFO [RS_OPEN_META-regionserver/3e788d7781dc:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-11T09:50:57,854 INFO [RS_OPEN_META-regionserver/3e788d7781dc:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-11T09:50:57,857 INFO [RS_OPEN_META-regionserver/3e788d7781dc:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=3e788d7781dc%2C37345%2C1733910656773.meta, suffix=.meta, logDir=hdfs://localhost:42845/user/jenkins/test-data/2e5a5d3c-7e86-1c89-ce63-26e52d995414/WALs/3e788d7781dc,37345,1733910656773, archiveDir=hdfs://localhost:42845/user/jenkins/test-data/2e5a5d3c-7e86-1c89-ce63-26e52d995414/oldWALs, maxLogs=32 2024-12-11T09:50:57,859 INFO [RS_OPEN_META-regionserver/3e788d7781dc:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 3e788d7781dc%2C37345%2C1733910656773.meta.1733910657859.meta 2024-12-11T09:50:57,869 INFO [RS_OPEN_META-regionserver/3e788d7781dc:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/2e5a5d3c-7e86-1c89-ce63-26e52d995414/WALs/3e788d7781dc,37345,1733910656773/3e788d7781dc%2C37345%2C1733910656773.meta.1733910657859.meta 2024-12-11T09:50:57,870 DEBUG [RS_OPEN_META-regionserver/3e788d7781dc:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36479:36479),(127.0.0.1/127.0.0.1:38105:38105),(127.0.0.1/127.0.0.1:34063:34063)] 2024-12-11T09:50:57,871 DEBUG [RS_OPEN_META-regionserver/3e788d7781dc:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-11T09:50:57,871 DEBUG [RS_OPEN_META-regionserver/3e788d7781dc:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-11T09:50:57,871 DEBUG [RS_OPEN_META-regionserver/3e788d7781dc:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-11T09:50:57,871 INFO [RS_OPEN_META-regionserver/3e788d7781dc:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-11T09:50:57,871 DEBUG [RS_OPEN_META-regionserver/3e788d7781dc:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-11T09:50:57,871 DEBUG [RS_OPEN_META-regionserver/3e788d7781dc:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T09:50:57,872 DEBUG [RS_OPEN_META-regionserver/3e788d7781dc:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-11T09:50:57,872 DEBUG [RS_OPEN_META-regionserver/3e788d7781dc:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-11T09:50:57,874 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-11T09:50:57,875 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-11T09:50:57,875 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T09:50:57,875 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-11T09:50:57,876 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-11T09:50:57,877 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-11T09:50:57,877 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T09:50:57,877 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-11T09:50:57,877 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-11T09:50:57,878 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-11T09:50:57,878 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T09:50:57,879 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-11T09:50:57,879 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-11T09:50:57,880 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-11T09:50:57,880 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T09:50:57,881 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-11T09:50:57,881 DEBUG [RS_OPEN_META-regionserver/3e788d7781dc:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-11T09:50:57,882 DEBUG [RS_OPEN_META-regionserver/3e788d7781dc:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42845/user/jenkins/test-data/2e5a5d3c-7e86-1c89-ce63-26e52d995414/data/hbase/meta/1588230740 2024-12-11T09:50:57,883 DEBUG [RS_OPEN_META-regionserver/3e788d7781dc:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42845/user/jenkins/test-data/2e5a5d3c-7e86-1c89-ce63-26e52d995414/data/hbase/meta/1588230740 2024-12-11T09:50:57,885 DEBUG [RS_OPEN_META-regionserver/3e788d7781dc:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-11T09:50:57,885 DEBUG [RS_OPEN_META-regionserver/3e788d7781dc:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-11T09:50:57,885 DEBUG [RS_OPEN_META-regionserver/3e788d7781dc:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-11T09:50:57,887 DEBUG [RS_OPEN_META-regionserver/3e788d7781dc:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-11T09:50:57,888 INFO [RS_OPEN_META-regionserver/3e788d7781dc:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=66759181, jitterRate=-0.00521068274974823}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-11T09:50:57,888 DEBUG [RS_OPEN_META-regionserver/3e788d7781dc:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-11T09:50:57,889 DEBUG [RS_OPEN_META-regionserver/3e788d7781dc:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733910657872Writing region info on filesystem at 1733910657872Initializing all the Stores at 1733910657873 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733910657873Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733910657874 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733910657874Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733910657874Cleaning up temporary data from old regions at 1733910657885 (+11 ms)Running coprocessor post-open hooks at 1733910657888 (+3 ms)Region opened successfully at 1733910657888 2024-12-11T09:50:57,890 INFO [RS_OPEN_META-regionserver/3e788d7781dc:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733910657842 2024-12-11T09:50:57,893 DEBUG [RS_OPEN_META-regionserver/3e788d7781dc:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-11T09:50:57,893 INFO [RS_OPEN_META-regionserver/3e788d7781dc:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-11T09:50:57,894 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=3e788d7781dc,37345,1733910656773 2024-12-11T09:50:57,895 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 3e788d7781dc,37345,1733910656773, state=OPEN 2024-12-11T09:50:57,934 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37097-0x100148012df0000, quorum=127.0.0.1:63562, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-11T09:50:57,934 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37345-0x100148012df0001, quorum=127.0.0.1:63562, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-11T09:50:57,934 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44149-0x100148012df0003, quorum=127.0.0.1:63562, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-11T09:50:57,934 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46013-0x100148012df0002, quorum=127.0.0.1:63562, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-11T09:50:57,934 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=3e788d7781dc,37345,1733910656773 2024-12-11T09:50:57,934 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-11T09:50:57,934 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-11T09:50:57,934 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-11T09:50:57,934 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-11T09:50:57,940 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-11T09:50:57,941 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=3e788d7781dc,37345,1733910656773 in 248 msec 2024-12-11T09:50:57,946 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-11T09:50:57,946 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 689 msec 2024-12-11T09:50:57,947 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-11T09:50:57,947 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-11T09:50:57,950 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-11T09:50:57,950 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3e788d7781dc,37345,1733910656773, seqNum=-1] 2024-12-11T09:50:57,950 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T09:50:57,952 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55259, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T09:50:57,963 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 763 msec 2024-12-11T09:50:57,963 INFO [master/3e788d7781dc:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733910657963, completionTime=-1 2024-12-11T09:50:57,963 INFO [master/3e788d7781dc:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=3; waited=0ms, expected min=3 server(s), max=3 server(s), master is running 2024-12-11T09:50:57,963 DEBUG [master/3e788d7781dc:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-11T09:50:57,966 INFO [master/3e788d7781dc:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=3 2024-12-11T09:50:57,966 INFO [master/3e788d7781dc:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733910717966 2024-12-11T09:50:57,966 INFO [master/3e788d7781dc:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733910777966 2024-12-11T09:50:57,966 INFO [master/3e788d7781dc:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 3 msec 2024-12-11T09:50:57,966 DEBUG [master/3e788d7781dc:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 1588230740 changed from -1.0 to 0.0, refreshing cache 2024-12-11T09:50:57,967 INFO [master/3e788d7781dc:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3e788d7781dc,37097,1733910656630-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-11T09:50:57,967 INFO [master/3e788d7781dc:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3e788d7781dc,37097,1733910656630-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-11T09:50:57,967 INFO [master/3e788d7781dc:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3e788d7781dc,37097,1733910656630-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-11T09:50:57,967 INFO [master/3e788d7781dc:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-3e788d7781dc:37097, period=300000, unit=MILLISECONDS is enabled. 2024-12-11T09:50:57,967 INFO [master/3e788d7781dc:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-11T09:50:57,968 INFO [master/3e788d7781dc:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-11T09:50:57,970 DEBUG [master/3e788d7781dc:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-11T09:50:57,973 INFO [master/3e788d7781dc:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.063sec 2024-12-11T09:50:57,973 INFO [master/3e788d7781dc:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-11T09:50:57,973 INFO [master/3e788d7781dc:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-11T09:50:57,973 INFO [master/3e788d7781dc:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-11T09:50:57,973 INFO [master/3e788d7781dc:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-11T09:50:57,973 INFO [master/3e788d7781dc:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-11T09:50:57,973 INFO [master/3e788d7781dc:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3e788d7781dc,37097,1733910656630-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-11T09:50:57,973 INFO [master/3e788d7781dc:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3e788d7781dc,37097,1733910656630-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-11T09:50:57,975 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4e30ec79, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T09:50:57,975 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 3e788d7781dc,37097,-1 for getting cluster id 2024-12-11T09:50:57,976 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-11T09:50:57,976 DEBUG [master/3e788d7781dc:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-11T09:50:57,976 INFO [master/3e788d7781dc:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-11T09:50:57,976 INFO [master/3e788d7781dc:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3e788d7781dc,37097,1733910656630-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-11T09:50:57,977 DEBUG [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'db0ea377-5fa4-43d8-a039-93b96a4d5c63' 2024-12-11T09:50:57,978 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-11T09:50:57,978 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "db0ea377-5fa4-43d8-a039-93b96a4d5c63" 2024-12-11T09:50:57,978 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@a3fb4d9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T09:50:57,978 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [3e788d7781dc,37097,-1] 2024-12-11T09:50:57,979 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-11T09:50:57,979 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T09:50:57,980 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59444, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-11T09:50:57,982 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6fe7973b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T09:50:57,982 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-11T09:50:57,983 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3e788d7781dc,37345,1733910656773, seqNum=-1] 2024-12-11T09:50:57,984 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T09:50:57,986 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44262, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T09:50:57,988 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=3e788d7781dc,37097,1733910656630 2024-12-11T09:50:57,989 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-11T09:50:57,990 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] client.AsyncConnectionImpl(321): The fetched master address is 3e788d7781dc,37097,1733910656630 2024-12-11T09:50:57,991 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@508ed88a 2024-12-11T09:50:57,991 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-11T09:50:57,993 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59456, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-11T09:50:57,993 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37097 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-11T09:50:57,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37097 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC 2024-12-11T09:50:57,997 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_PRE_OPERATION 2024-12-11T09:50:57,998 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T09:50:57,998 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37097 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestHBaseWalOnEC" procId is: 4 2024-12-11T09:50:57,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37097 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-11T09:50:57,999 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-11T09:50:58,007 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33295 is added to blk_1073741837_1013 (size=392) 2024-12-11T09:50:58,008 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43797 is added to blk_1073741837_1013 (size=392) 2024-12-11T09:50:58,008 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37501 is added to blk_1073741837_1013 (size=392) 2024-12-11T09:50:58,012 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => ad65097553d5debff68e488fe2df550b, NAME => 'TestHBaseWalOnEC,,1733910657993.ad65097553d5debff68e488fe2df550b.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:42845/user/jenkins/test-data/2e5a5d3c-7e86-1c89-ce63-26e52d995414 2024-12-11T09:50:58,020 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43797 is added to blk_1073741838_1014 (size=51) 2024-12-11T09:50:58,020 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37501 is added to blk_1073741838_1014 (size=51) 2024-12-11T09:50:58,021 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33295 is added to blk_1073741838_1014 (size=51) 2024-12-11T09:50:58,022 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1733910657993.ad65097553d5debff68e488fe2df550b.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T09:50:58,022 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1722): Closing ad65097553d5debff68e488fe2df550b, disabling compactions & flushes 2024-12-11T09:50:58,022 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1733910657993.ad65097553d5debff68e488fe2df550b. 2024-12-11T09:50:58,022 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1733910657993.ad65097553d5debff68e488fe2df550b. 2024-12-11T09:50:58,022 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1733910657993.ad65097553d5debff68e488fe2df550b. after waiting 0 ms 2024-12-11T09:50:58,022 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1733910657993.ad65097553d5debff68e488fe2df550b. 2024-12-11T09:50:58,022 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1733910657993.ad65097553d5debff68e488fe2df550b. 2024-12-11T09:50:58,022 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1676): Region close journal for ad65097553d5debff68e488fe2df550b: Waiting for close lock at 1733910658022Disabling compacts and flushes for region at 1733910658022Disabling writes for close at 1733910658022Writing region close event to WAL at 1733910658022Closed at 1733910658022 2024-12-11T09:50:58,024 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ADD_TO_META 2024-12-11T09:50:58,024 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestHBaseWalOnEC,,1733910657993.ad65097553d5debff68e488fe2df550b.","families":{"info":[{"qualifier":"regioninfo","vlen":50,"tag":[],"timestamp":"1733910658024"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733910658024"}]},"ts":"1733910658024"} 2024-12-11T09:50:58,028 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-11T09:50:58,030 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-11T09:50:58,030 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733910658030"}]},"ts":"1733910658030"} 2024-12-11T09:50:58,034 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLING in hbase:meta 2024-12-11T09:50:58,034 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {3e788d7781dc=0} racks are {/default-rack=0} 2024-12-11T09:50:58,035 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-11T09:50:58,035 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-11T09:50:58,035 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-11T09:50:58,035 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-11T09:50:58,035 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-11T09:50:58,035 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-11T09:50:58,035 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-11T09:50:58,035 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-11T09:50:58,036 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-11T09:50:58,036 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-11T09:50:58,036 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=ad65097553d5debff68e488fe2df550b, ASSIGN}] 2024-12-11T09:50:58,038 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=ad65097553d5debff68e488fe2df550b, ASSIGN 2024-12-11T09:50:58,040 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=ad65097553d5debff68e488fe2df550b, ASSIGN; state=OFFLINE, location=3e788d7781dc,44149,1733910656854; forceNewPlan=false, retain=false 2024-12-11T09:50:58,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37097 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-11T09:50:58,191 INFO [3e788d7781dc:37097 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-11T09:50:58,191 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=ad65097553d5debff68e488fe2df550b, regionState=OPENING, regionLocation=3e788d7781dc,44149,1733910656854 2024-12-11T09:50:58,196 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-10-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=ad65097553d5debff68e488fe2df550b, ASSIGN because future has completed 2024-12-11T09:50:58,197 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure ad65097553d5debff68e488fe2df550b, server=3e788d7781dc,44149,1733910656854}] 2024-12-11T09:50:58,279 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-11T09:50:58,284 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-11T09:50:58,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37097 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-11T09:50:58,328 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-11T09:50:58,329 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-11T09:50:58,354 DEBUG [RSProcedureDispatcher-pool-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-11T09:50:58,356 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33681, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-11T09:50:58,361 INFO [RS_OPEN_REGION-regionserver/3e788d7781dc:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestHBaseWalOnEC,,1733910657993.ad65097553d5debff68e488fe2df550b. 2024-12-11T09:50:58,362 DEBUG [RS_OPEN_REGION-regionserver/3e788d7781dc:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => ad65097553d5debff68e488fe2df550b, NAME => 'TestHBaseWalOnEC,,1733910657993.ad65097553d5debff68e488fe2df550b.', STARTKEY => '', ENDKEY => ''} 2024-12-11T09:50:58,362 DEBUG [RS_OPEN_REGION-regionserver/3e788d7781dc:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestHBaseWalOnEC ad65097553d5debff68e488fe2df550b 2024-12-11T09:50:58,362 DEBUG [RS_OPEN_REGION-regionserver/3e788d7781dc:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1733910657993.ad65097553d5debff68e488fe2df550b.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T09:50:58,363 DEBUG [RS_OPEN_REGION-regionserver/3e788d7781dc:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for ad65097553d5debff68e488fe2df550b 2024-12-11T09:50:58,363 DEBUG [RS_OPEN_REGION-regionserver/3e788d7781dc:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for ad65097553d5debff68e488fe2df550b 2024-12-11T09:50:58,365 INFO [StoreOpener-ad65097553d5debff68e488fe2df550b-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region ad65097553d5debff68e488fe2df550b 2024-12-11T09:50:58,368 INFO [StoreOpener-ad65097553d5debff68e488fe2df550b-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region ad65097553d5debff68e488fe2df550b columnFamilyName cf 2024-12-11T09:50:58,368 DEBUG [StoreOpener-ad65097553d5debff68e488fe2df550b-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T09:50:58,369 INFO [StoreOpener-ad65097553d5debff68e488fe2df550b-1 {}] regionserver.HStore(327): Store=ad65097553d5debff68e488fe2df550b/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T09:50:58,369 DEBUG [RS_OPEN_REGION-regionserver/3e788d7781dc:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for ad65097553d5debff68e488fe2df550b 2024-12-11T09:50:58,370 DEBUG [RS_OPEN_REGION-regionserver/3e788d7781dc:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42845/user/jenkins/test-data/2e5a5d3c-7e86-1c89-ce63-26e52d995414/data/default/TestHBaseWalOnEC/ad65097553d5debff68e488fe2df550b 2024-12-11T09:50:58,370 DEBUG [RS_OPEN_REGION-regionserver/3e788d7781dc:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42845/user/jenkins/test-data/2e5a5d3c-7e86-1c89-ce63-26e52d995414/data/default/TestHBaseWalOnEC/ad65097553d5debff68e488fe2df550b 2024-12-11T09:50:58,371 DEBUG [RS_OPEN_REGION-regionserver/3e788d7781dc:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for ad65097553d5debff68e488fe2df550b 2024-12-11T09:50:58,371 DEBUG [RS_OPEN_REGION-regionserver/3e788d7781dc:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for ad65097553d5debff68e488fe2df550b 2024-12-11T09:50:58,373 DEBUG [RS_OPEN_REGION-regionserver/3e788d7781dc:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for ad65097553d5debff68e488fe2df550b 2024-12-11T09:50:58,375 DEBUG [RS_OPEN_REGION-regionserver/3e788d7781dc:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42845/user/jenkins/test-data/2e5a5d3c-7e86-1c89-ce63-26e52d995414/data/default/TestHBaseWalOnEC/ad65097553d5debff68e488fe2df550b/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-11T09:50:58,376 INFO [RS_OPEN_REGION-regionserver/3e788d7781dc:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened ad65097553d5debff68e488fe2df550b; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=65240312, jitterRate=-0.027843594551086426}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-11T09:50:58,376 DEBUG [RS_OPEN_REGION-regionserver/3e788d7781dc:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for ad65097553d5debff68e488fe2df550b 2024-12-11T09:50:58,377 DEBUG [RS_OPEN_REGION-regionserver/3e788d7781dc:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for ad65097553d5debff68e488fe2df550b: Running coprocessor pre-open hook at 1733910658363Writing region info on filesystem at 1733910658363Initializing all the Stores at 1733910658364 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733910658365 (+1 ms)Cleaning up temporary data from old regions at 1733910658371 (+6 ms)Running coprocessor post-open hooks at 1733910658376 (+5 ms)Region opened successfully at 1733910658377 (+1 ms) 2024-12-11T09:50:58,378 INFO [RS_OPEN_REGION-regionserver/3e788d7781dc:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestHBaseWalOnEC,,1733910657993.ad65097553d5debff68e488fe2df550b., pid=6, masterSystemTime=1733910658353 2024-12-11T09:50:58,382 DEBUG [RS_OPEN_REGION-regionserver/3e788d7781dc:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestHBaseWalOnEC,,1733910657993.ad65097553d5debff68e488fe2df550b. 2024-12-11T09:50:58,382 INFO [RS_OPEN_REGION-regionserver/3e788d7781dc:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestHBaseWalOnEC,,1733910657993.ad65097553d5debff68e488fe2df550b. 2024-12-11T09:50:58,383 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=ad65097553d5debff68e488fe2df550b, regionState=OPEN, openSeqNum=2, regionLocation=3e788d7781dc,44149,1733910656854 2024-12-11T09:50:58,386 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-10-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure ad65097553d5debff68e488fe2df550b, server=3e788d7781dc,44149,1733910656854 because future has completed 2024-12-11T09:50:58,393 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-11T09:50:58,393 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure ad65097553d5debff68e488fe2df550b, server=3e788d7781dc,44149,1733910656854 in 191 msec 2024-12-11T09:50:58,397 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-11T09:50:58,397 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=ad65097553d5debff68e488fe2df550b, ASSIGN in 357 msec 2024-12-11T09:50:58,398 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-11T09:50:58,398 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733910658398"}]},"ts":"1733910658398"} 2024-12-11T09:50:58,401 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLED in hbase:meta 2024-12-11T09:50:58,403 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_POST_OPERATION 2024-12-11T09:50:58,406 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC in 409 msec 2024-12-11T09:50:58,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37097 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-11T09:50:58,625 INFO [RPCClient-NioEventLoopGroup-6-9 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestHBaseWalOnEC completed 2024-12-11T09:50:58,625 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table TestHBaseWalOnEC get assigned. Timeout = 60000ms 2024-12-11T09:50:58,626 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-11T09:50:58,632 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table TestHBaseWalOnEC assigned to meta. Checking AM states. 2024-12-11T09:50:58,633 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-11T09:50:58,633 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table TestHBaseWalOnEC assigned. 2024-12-11T09:50:58,638 DEBUG [RPCClient-NioEventLoopGroup-6-8 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestHBaseWalOnEC', row='row', locateType=CURRENT is [region=TestHBaseWalOnEC,,1733910657993.ad65097553d5debff68e488fe2df550b., hostname=3e788d7781dc,44149,1733910656854, seqNum=2] 2024-12-11T09:50:58,638 DEBUG [RPCClient-NioEventLoopGroup-6-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T09:50:58,640 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-10-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38748, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T09:50:58,644 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37097 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestHBaseWalOnEC 2024-12-11T09:50:58,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37097 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC 2024-12-11T09:50:58,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37097 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-11T09:50:58,649 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_PREPARE 2024-12-11T09:50:58,651 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-11T09:50:58,651 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-11T09:50:58,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37097 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-11T09:50:58,805 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44149 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-12-11T09:50:58,806 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3e788d7781dc:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestHBaseWalOnEC,,1733910657993.ad65097553d5debff68e488fe2df550b. 2024-12-11T09:50:58,806 INFO [RS_FLUSH_OPERATIONS-regionserver/3e788d7781dc:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing ad65097553d5debff68e488fe2df550b 1/1 column families, dataSize=32 B heapSize=360 B 2024-12-11T09:50:58,822 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3e788d7781dc:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42845/user/jenkins/test-data/2e5a5d3c-7e86-1c89-ce63-26e52d995414/data/default/TestHBaseWalOnEC/ad65097553d5debff68e488fe2df550b/.tmp/cf/e182ce7cf9494b139ad7300ad746186e is 36, key is row/cf:cq/1733910658641/Put/seqid=0 2024-12-11T09:50:58,829 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33295 is added to blk_1073741839_1015 (size=4787) 2024-12-11T09:50:58,829 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37501 is added to blk_1073741839_1015 (size=4787) 2024-12-11T09:50:58,830 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43797 is added to blk_1073741839_1015 (size=4787) 2024-12-11T09:50:58,831 INFO [RS_FLUSH_OPERATIONS-regionserver/3e788d7781dc:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=32 B at sequenceid=5 (bloomFilter=false), to=hdfs://localhost:42845/user/jenkins/test-data/2e5a5d3c-7e86-1c89-ce63-26e52d995414/data/default/TestHBaseWalOnEC/ad65097553d5debff68e488fe2df550b/.tmp/cf/e182ce7cf9494b139ad7300ad746186e 2024-12-11T09:50:58,841 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3e788d7781dc:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42845/user/jenkins/test-data/2e5a5d3c-7e86-1c89-ce63-26e52d995414/data/default/TestHBaseWalOnEC/ad65097553d5debff68e488fe2df550b/.tmp/cf/e182ce7cf9494b139ad7300ad746186e as hdfs://localhost:42845/user/jenkins/test-data/2e5a5d3c-7e86-1c89-ce63-26e52d995414/data/default/TestHBaseWalOnEC/ad65097553d5debff68e488fe2df550b/cf/e182ce7cf9494b139ad7300ad746186e 2024-12-11T09:50:58,850 INFO [RS_FLUSH_OPERATIONS-regionserver/3e788d7781dc:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42845/user/jenkins/test-data/2e5a5d3c-7e86-1c89-ce63-26e52d995414/data/default/TestHBaseWalOnEC/ad65097553d5debff68e488fe2df550b/cf/e182ce7cf9494b139ad7300ad746186e, entries=1, sequenceid=5, filesize=4.7 K 2024-12-11T09:50:58,852 INFO [RS_FLUSH_OPERATIONS-regionserver/3e788d7781dc:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~32 B/32, heapSize ~344 B/344, currentSize=0 B/0 for ad65097553d5debff68e488fe2df550b in 46ms, sequenceid=5, compaction requested=false 2024-12-11T09:50:58,852 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3e788d7781dc:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for ad65097553d5debff68e488fe2df550b: 2024-12-11T09:50:58,852 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3e788d7781dc:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestHBaseWalOnEC,,1733910657993.ad65097553d5debff68e488fe2df550b. 2024-12-11T09:50:58,852 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3e788d7781dc:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-12-11T09:50:58,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37097 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-12-11T09:50:58,859 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-12-11T09:50:58,859 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 204 msec 2024-12-11T09:50:58,863 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC in 216 msec 2024-12-11T09:50:58,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37097 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-11T09:50:58,966 INFO [RPCClient-NioEventLoopGroup-6-9 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestHBaseWalOnEC completed 2024-12-11T09:50:58,974 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-11T09:50:58,975 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-11T09:50:58,975 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-11T09:50:58,975 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T09:50:58,975 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T09:50:58,975 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-11T09:50:58,975 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-11T09:50:58,975 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1820564016, stopped=false 2024-12-11T09:50:58,975 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=3e788d7781dc,37097,1733910656630 2024-12-11T09:50:59,018 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46013-0x100148012df0002, quorum=127.0.0.1:63562, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-11T09:50:59,018 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37345-0x100148012df0001, quorum=127.0.0.1:63562, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-11T09:50:59,018 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37097-0x100148012df0000, quorum=127.0.0.1:63562, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-11T09:50:59,018 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44149-0x100148012df0003, quorum=127.0.0.1:63562, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-11T09:50:59,018 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37345-0x100148012df0001, quorum=127.0.0.1:63562, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T09:50:59,018 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46013-0x100148012df0002, quorum=127.0.0.1:63562, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T09:50:59,018 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37097-0x100148012df0000, quorum=127.0.0.1:63562, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T09:50:59,018 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-11T09:50:59,018 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44149-0x100148012df0003, quorum=127.0.0.1:63562, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T09:50:59,019 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-11T09:50:59,019 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-11T09:50:59,019 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T09:50:59,020 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:44149-0x100148012df0003, quorum=127.0.0.1:63562, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-11T09:50:59,020 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '3e788d7781dc,37345,1733910656773' ***** 2024-12-11T09:50:59,020 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:46013-0x100148012df0002, quorum=127.0.0.1:63562, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-11T09:50:59,020 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-11T09:50:59,020 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:37345-0x100148012df0001, quorum=127.0.0.1:63562, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-11T09:50:59,020 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '3e788d7781dc,46013,1733910656814' ***** 2024-12-11T09:50:59,020 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-11T09:50:59,021 INFO [RS:0;3e788d7781dc:37345 {}] regionserver.HeapMemoryManager(224): Stopping 2024-12-11T09:50:59,021 INFO [RS:0;3e788d7781dc:37345 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-11T09:50:59,021 INFO [RS:0;3e788d7781dc:37345 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-11T09:50:59,021 INFO [RS:0;3e788d7781dc:37345 {}] regionserver.HRegionServer(959): stopping server 3e788d7781dc,37345,1733910656773 2024-12-11T09:50:59,021 INFO [RS:1;3e788d7781dc:46013 {}] regionserver.HeapMemoryManager(224): Stopping 2024-12-11T09:50:59,021 INFO [RS:0;3e788d7781dc:37345 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-11T09:50:59,022 INFO [RS:1;3e788d7781dc:46013 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-11T09:50:59,022 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '3e788d7781dc,44149,1733910656854' ***** 2024-12-11T09:50:59,022 INFO [RS:1;3e788d7781dc:46013 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-11T09:50:59,022 INFO [RS:0;3e788d7781dc:37345 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;3e788d7781dc:37345. 2024-12-11T09:50:59,022 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-11T09:50:59,022 DEBUG [RS:0;3e788d7781dc:37345 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-11T09:50:59,022 INFO [RS:1;3e788d7781dc:46013 {}] regionserver.HRegionServer(959): stopping server 3e788d7781dc,46013,1733910656814 2024-12-11T09:50:59,023 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:37097-0x100148012df0000, quorum=127.0.0.1:63562, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-11T09:50:59,023 INFO [RS:2;3e788d7781dc:44149 {}] regionserver.HeapMemoryManager(224): Stopping 2024-12-11T09:50:59,023 INFO [RS:1;3e788d7781dc:46013 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-11T09:50:59,023 DEBUG [RS:0;3e788d7781dc:37345 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T09:50:59,023 INFO [RS:2;3e788d7781dc:44149 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-11T09:50:59,023 INFO [RS:1;3e788d7781dc:46013 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;3e788d7781dc:46013. 2024-12-11T09:50:59,023 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-11T09:50:59,023 INFO [RS:0;3e788d7781dc:37345 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-11T09:50:59,023 INFO [RS:2;3e788d7781dc:44149 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-11T09:50:59,023 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-11T09:50:59,024 INFO [RS:0;3e788d7781dc:37345 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-11T09:50:59,024 INFO [RS:2;3e788d7781dc:44149 {}] regionserver.HRegionServer(3091): Received CLOSE for ad65097553d5debff68e488fe2df550b 2024-12-11T09:50:59,024 INFO [RS:0;3e788d7781dc:37345 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-11T09:50:59,023 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-11T09:50:59,023 DEBUG [RS:1;3e788d7781dc:46013 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-11T09:50:59,024 INFO [RS:0;3e788d7781dc:37345 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-11T09:50:59,024 DEBUG [RS:1;3e788d7781dc:46013 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T09:50:59,024 INFO [RS:1;3e788d7781dc:46013 {}] regionserver.HRegionServer(976): stopping server 3e788d7781dc,46013,1733910656814; all regions closed. 2024-12-11T09:50:59,024 INFO [RS:2;3e788d7781dc:44149 {}] regionserver.HRegionServer(959): stopping server 3e788d7781dc,44149,1733910656854 2024-12-11T09:50:59,024 INFO [RS:2;3e788d7781dc:44149 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-11T09:50:59,024 INFO [RS:0;3e788d7781dc:37345 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-12-11T09:50:59,024 INFO [RS:2;3e788d7781dc:44149 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:2;3e788d7781dc:44149. 2024-12-11T09:50:59,024 DEBUG [RS:0;3e788d7781dc:37345 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-12-11T09:50:59,025 DEBUG [RS:0;3e788d7781dc:37345 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-12-11T09:50:59,025 DEBUG [RS:2;3e788d7781dc:44149 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-11T09:50:59,025 DEBUG [RS:2;3e788d7781dc:44149 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T09:50:59,025 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-11T09:50:59,025 INFO [RS:2;3e788d7781dc:44149 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-12-11T09:50:59,025 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-11T09:50:59,025 DEBUG [RS:2;3e788d7781dc:44149 {}] regionserver.HRegionServer(1325): Online Regions={ad65097553d5debff68e488fe2df550b=TestHBaseWalOnEC,,1733910657993.ad65097553d5debff68e488fe2df550b.} 2024-12-11T09:50:59,025 DEBUG [RS:2;3e788d7781dc:44149 {}] regionserver.HRegionServer(1351): Waiting on ad65097553d5debff68e488fe2df550b 2024-12-11T09:50:59,025 DEBUG [RS_CLOSE_META-regionserver/3e788d7781dc:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-11T09:50:59,025 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-11T09:50:59,025 INFO [RS_CLOSE_META-regionserver/3e788d7781dc:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-11T09:50:59,025 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-11T09:50:59,025 DEBUG [RS_CLOSE_META-regionserver/3e788d7781dc:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-11T09:50:59,025 DEBUG [RS_CLOSE_REGION-regionserver/3e788d7781dc:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing ad65097553d5debff68e488fe2df550b, disabling compactions & flushes 2024-12-11T09:50:59,025 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-11T09:50:59,026 INFO [RS_CLOSE_REGION-regionserver/3e788d7781dc:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1733910657993.ad65097553d5debff68e488fe2df550b. 2024-12-11T09:50:59,026 DEBUG [RS_CLOSE_META-regionserver/3e788d7781dc:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-11T09:50:59,026 DEBUG [RS_CLOSE_REGION-regionserver/3e788d7781dc:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1733910657993.ad65097553d5debff68e488fe2df550b. 2024-12-11T09:50:59,026 DEBUG [RS_CLOSE_META-regionserver/3e788d7781dc:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-11T09:50:59,026 DEBUG [RS_CLOSE_REGION-regionserver/3e788d7781dc:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1733910657993.ad65097553d5debff68e488fe2df550b. after waiting 0 ms 2024-12-11T09:50:59,026 DEBUG [RS_CLOSE_REGION-regionserver/3e788d7781dc:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1733910657993.ad65097553d5debff68e488fe2df550b. 2024-12-11T09:50:59,026 INFO [RS_CLOSE_META-regionserver/3e788d7781dc:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.34 KB heapSize=3.38 KB 2024-12-11T09:50:59,029 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33295 is added to blk_1073741833_1009 (size=93) 2024-12-11T09:50:59,029 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37501 is added to blk_1073741833_1009 (size=93) 2024-12-11T09:50:59,030 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43797 is added to blk_1073741833_1009 (size=93) 2024-12-11T09:50:59,031 DEBUG [RS_CLOSE_REGION-regionserver/3e788d7781dc:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42845/user/jenkins/test-data/2e5a5d3c-7e86-1c89-ce63-26e52d995414/data/default/TestHBaseWalOnEC/ad65097553d5debff68e488fe2df550b/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-11T09:50:59,032 DEBUG [RS:1;3e788d7781dc:46013 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/2e5a5d3c-7e86-1c89-ce63-26e52d995414/oldWALs 2024-12-11T09:50:59,032 INFO [RS:1;3e788d7781dc:46013 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 3e788d7781dc%2C46013%2C1733910656814:(num 1733910657513) 2024-12-11T09:50:59,032 DEBUG [RS:1;3e788d7781dc:46013 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T09:50:59,032 INFO [RS:1;3e788d7781dc:46013 {}] regionserver.LeaseManager(133): Closed leases 2024-12-11T09:50:59,032 INFO [RS:1;3e788d7781dc:46013 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-11T09:50:59,032 INFO [RS_CLOSE_REGION-regionserver/3e788d7781dc:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1733910657993.ad65097553d5debff68e488fe2df550b. 2024-12-11T09:50:59,033 DEBUG [RS_CLOSE_REGION-regionserver/3e788d7781dc:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for ad65097553d5debff68e488fe2df550b: Waiting for close lock at 1733910659025Running coprocessor pre-close hooks at 1733910659025Disabling compacts and flushes for region at 1733910659025Disabling writes for close at 1733910659026 (+1 ms)Writing region close event to WAL at 1733910659027 (+1 ms)Running coprocessor post-close hooks at 1733910659032 (+5 ms)Closed at 1733910659032 2024-12-11T09:50:59,033 INFO [RS:1;3e788d7781dc:46013 {}] hbase.ChoreService(370): Chore service for: regionserver/3e788d7781dc:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-11T09:50:59,033 DEBUG [RS_CLOSE_REGION-regionserver/3e788d7781dc:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestHBaseWalOnEC,,1733910657993.ad65097553d5debff68e488fe2df550b. 2024-12-11T09:50:59,033 INFO [RS:1;3e788d7781dc:46013 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-11T09:50:59,033 INFO [RS:1;3e788d7781dc:46013 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-11T09:50:59,033 INFO [RS:1;3e788d7781dc:46013 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-11T09:50:59,033 INFO [RS:1;3e788d7781dc:46013 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-11T09:50:59,033 INFO [RS:1;3e788d7781dc:46013 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:46013 2024-12-11T09:50:59,033 INFO [regionserver/3e788d7781dc:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-11T09:50:59,042 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46013-0x100148012df0002, quorum=127.0.0.1:63562, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/3e788d7781dc,46013,1733910656814 2024-12-11T09:50:59,042 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37097-0x100148012df0000, quorum=127.0.0.1:63562, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-11T09:50:59,042 INFO [RS:1;3e788d7781dc:46013 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-11T09:50:59,044 DEBUG [RS_CLOSE_META-regionserver/3e788d7781dc:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42845/user/jenkins/test-data/2e5a5d3c-7e86-1c89-ce63-26e52d995414/data/hbase/meta/1588230740/.tmp/info/5d4b50a6fbb04c89aad3c82a12f0cd21 is 153, key is TestHBaseWalOnEC,,1733910657993.ad65097553d5debff68e488fe2df550b./info:regioninfo/1733910658383/Put/seqid=0 2024-12-11T09:50:59,050 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43797 is added to blk_1073741840_1016 (size=6637) 2024-12-11T09:50:59,050 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33295 is added to blk_1073741840_1016 (size=6637) 2024-12-11T09:50:59,051 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [3e788d7781dc,46013,1733910656814] 2024-12-11T09:50:59,053 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37501 is added to blk_1073741840_1016 (size=6637) 2024-12-11T09:50:59,054 INFO [RS_CLOSE_META-regionserver/3e788d7781dc:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.18 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:42845/user/jenkins/test-data/2e5a5d3c-7e86-1c89-ce63-26e52d995414/data/hbase/meta/1588230740/.tmp/info/5d4b50a6fbb04c89aad3c82a12f0cd21 2024-12-11T09:50:59,059 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/3e788d7781dc,46013,1733910656814 already deleted, retry=false 2024-12-11T09:50:59,059 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 3e788d7781dc,46013,1733910656814 expired; onlineServers=2 2024-12-11T09:50:59,077 INFO [regionserver/3e788d7781dc:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-11T09:50:59,077 INFO [regionserver/3e788d7781dc:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-11T09:50:59,079 INFO [regionserver/3e788d7781dc:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-11T09:50:59,080 DEBUG [RS_CLOSE_META-regionserver/3e788d7781dc:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42845/user/jenkins/test-data/2e5a5d3c-7e86-1c89-ce63-26e52d995414/data/hbase/meta/1588230740/.tmp/ns/aac0c046748f49cf872dbdcfb6c92c82 is 43, key is default/ns:d/1733910657953/Put/seqid=0 2024-12-11T09:50:59,086 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33295 is added to blk_1073741841_1017 (size=5153) 2024-12-11T09:50:59,087 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37501 is added to blk_1073741841_1017 (size=5153) 2024-12-11T09:50:59,087 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43797 is added to blk_1073741841_1017 (size=5153) 2024-12-11T09:50:59,087 INFO [RS_CLOSE_META-regionserver/3e788d7781dc:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:42845/user/jenkins/test-data/2e5a5d3c-7e86-1c89-ce63-26e52d995414/data/hbase/meta/1588230740/.tmp/ns/aac0c046748f49cf872dbdcfb6c92c82 2024-12-11T09:50:59,110 DEBUG [RS_CLOSE_META-regionserver/3e788d7781dc:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42845/user/jenkins/test-data/2e5a5d3c-7e86-1c89-ce63-26e52d995414/data/hbase/meta/1588230740/.tmp/table/4b3485de80eb407d8f36e2ea6c3dbb2d is 52, key is TestHBaseWalOnEC/table:state/1733910658398/Put/seqid=0 2024-12-11T09:50:59,118 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37501 is added to blk_1073741842_1018 (size=5249) 2024-12-11T09:50:59,118 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33295 is added to blk_1073741842_1018 (size=5249) 2024-12-11T09:50:59,118 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43797 is added to blk_1073741842_1018 (size=5249) 2024-12-11T09:50:59,118 INFO [RS_CLOSE_META-regionserver/3e788d7781dc:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=96 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:42845/user/jenkins/test-data/2e5a5d3c-7e86-1c89-ce63-26e52d995414/data/hbase/meta/1588230740/.tmp/table/4b3485de80eb407d8f36e2ea6c3dbb2d 2024-12-11T09:50:59,127 DEBUG [RS_CLOSE_META-regionserver/3e788d7781dc:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42845/user/jenkins/test-data/2e5a5d3c-7e86-1c89-ce63-26e52d995414/data/hbase/meta/1588230740/.tmp/info/5d4b50a6fbb04c89aad3c82a12f0cd21 as hdfs://localhost:42845/user/jenkins/test-data/2e5a5d3c-7e86-1c89-ce63-26e52d995414/data/hbase/meta/1588230740/info/5d4b50a6fbb04c89aad3c82a12f0cd21 2024-12-11T09:50:59,136 INFO [RS_CLOSE_META-regionserver/3e788d7781dc:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42845/user/jenkins/test-data/2e5a5d3c-7e86-1c89-ce63-26e52d995414/data/hbase/meta/1588230740/info/5d4b50a6fbb04c89aad3c82a12f0cd21, entries=10, sequenceid=11, filesize=6.5 K 2024-12-11T09:50:59,137 DEBUG [RS_CLOSE_META-regionserver/3e788d7781dc:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42845/user/jenkins/test-data/2e5a5d3c-7e86-1c89-ce63-26e52d995414/data/hbase/meta/1588230740/.tmp/ns/aac0c046748f49cf872dbdcfb6c92c82 as hdfs://localhost:42845/user/jenkins/test-data/2e5a5d3c-7e86-1c89-ce63-26e52d995414/data/hbase/meta/1588230740/ns/aac0c046748f49cf872dbdcfb6c92c82 2024-12-11T09:50:59,145 INFO [RS_CLOSE_META-regionserver/3e788d7781dc:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42845/user/jenkins/test-data/2e5a5d3c-7e86-1c89-ce63-26e52d995414/data/hbase/meta/1588230740/ns/aac0c046748f49cf872dbdcfb6c92c82, entries=2, sequenceid=11, filesize=5.0 K 2024-12-11T09:50:59,146 DEBUG [RS_CLOSE_META-regionserver/3e788d7781dc:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42845/user/jenkins/test-data/2e5a5d3c-7e86-1c89-ce63-26e52d995414/data/hbase/meta/1588230740/.tmp/table/4b3485de80eb407d8f36e2ea6c3dbb2d as hdfs://localhost:42845/user/jenkins/test-data/2e5a5d3c-7e86-1c89-ce63-26e52d995414/data/hbase/meta/1588230740/table/4b3485de80eb407d8f36e2ea6c3dbb2d 2024-12-11T09:50:59,151 INFO [RS:1;3e788d7781dc:46013 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-11T09:50:59,151 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46013-0x100148012df0002, quorum=127.0.0.1:63562, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-11T09:50:59,151 INFO [RS:1;3e788d7781dc:46013 {}] regionserver.HRegionServer(1031): Exiting; stopping=3e788d7781dc,46013,1733910656814; zookeeper connection closed. 2024-12-11T09:50:59,151 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46013-0x100148012df0002, quorum=127.0.0.1:63562, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-11T09:50:59,151 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@1ccd9cd9 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@1ccd9cd9 2024-12-11T09:50:59,154 INFO [RS_CLOSE_META-regionserver/3e788d7781dc:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42845/user/jenkins/test-data/2e5a5d3c-7e86-1c89-ce63-26e52d995414/data/hbase/meta/1588230740/table/4b3485de80eb407d8f36e2ea6c3dbb2d, entries=2, sequenceid=11, filesize=5.1 K 2024-12-11T09:50:59,155 INFO [RS_CLOSE_META-regionserver/3e788d7781dc:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 129ms, sequenceid=11, compaction requested=false 2024-12-11T09:50:59,161 DEBUG [RS_CLOSE_META-regionserver/3e788d7781dc:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42845/user/jenkins/test-data/2e5a5d3c-7e86-1c89-ce63-26e52d995414/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-12-11T09:50:59,161 DEBUG [RS_CLOSE_META-regionserver/3e788d7781dc:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-11T09:50:59,162 INFO [RS_CLOSE_META-regionserver/3e788d7781dc:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-11T09:50:59,162 DEBUG [RS_CLOSE_META-regionserver/3e788d7781dc:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733910659025Running coprocessor pre-close hooks at 1733910659025Disabling compacts and flushes for region at 1733910659025Disabling writes for close at 1733910659026 (+1 ms)Obtaining lock to block concurrent updates at 1733910659026Preparing flush snapshotting stores in 1588230740 at 1733910659026Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1377, getHeapSize=3392, getOffHeapSize=0, getCellsCount=14 at 1733910659027 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1733910659028 (+1 ms)Flushing 1588230740/info: creating writer at 1733910659029 (+1 ms)Flushing 1588230740/info: appending metadata at 1733910659044 (+15 ms)Flushing 1588230740/info: closing flushed file at 1733910659044Flushing 1588230740/ns: creating writer at 1733910659062 (+18 ms)Flushing 1588230740/ns: appending metadata at 1733910659079 (+17 ms)Flushing 1588230740/ns: closing flushed file at 1733910659079Flushing 1588230740/table: creating writer at 1733910659094 (+15 ms)Flushing 1588230740/table: appending metadata at 1733910659110 (+16 ms)Flushing 1588230740/table: closing flushed file at 1733910659110Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7df24fd1: reopening flushed file at 1733910659125 (+15 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@11c26f5f: reopening flushed file at 1733910659136 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6b5298c6: reopening flushed file at 1733910659145 (+9 ms)Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 129ms, sequenceid=11, compaction requested=false at 1733910659155 (+10 ms)Writing region close event to WAL at 1733910659157 (+2 ms)Running coprocessor post-close hooks at 1733910659161 (+4 ms)Closed at 1733910659161 2024-12-11T09:50:59,162 DEBUG [RS_CLOSE_META-regionserver/3e788d7781dc:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-11T09:50:59,225 INFO [RS:0;3e788d7781dc:37345 {}] regionserver.HRegionServer(976): stopping server 3e788d7781dc,37345,1733910656773; all regions closed. 2024-12-11T09:50:59,225 INFO [RS:2;3e788d7781dc:44149 {}] regionserver.HRegionServer(976): stopping server 3e788d7781dc,44149,1733910656854; all regions closed. 2024-12-11T09:50:59,225 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-11T09:50:59,226 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-11T09:50:59,226 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-11T09:50:59,226 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-11T09:50:59,226 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-11T09:50:59,226 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-11T09:50:59,226 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-11T09:50:59,226 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-11T09:50:59,226 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-11T09:50:59,226 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-11T09:50:59,229 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33295 is added to blk_1073741835_1011 (size=1298) 2024-12-11T09:50:59,229 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43797 is added to blk_1073741836_1012 (size=2751) 2024-12-11T09:50:59,229 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33295 is added to blk_1073741836_1012 (size=2751) 2024-12-11T09:50:59,229 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37501 is added to blk_1073741835_1011 (size=1298) 2024-12-11T09:50:59,230 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37501 is added to blk_1073741836_1012 (size=2751) 2024-12-11T09:50:59,230 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43797 is added to blk_1073741835_1011 (size=1298) 2024-12-11T09:50:59,232 DEBUG [RS:0;3e788d7781dc:37345 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/2e5a5d3c-7e86-1c89-ce63-26e52d995414/oldWALs 2024-12-11T09:50:59,232 DEBUG [RS:2;3e788d7781dc:44149 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/2e5a5d3c-7e86-1c89-ce63-26e52d995414/oldWALs 2024-12-11T09:50:59,232 INFO [RS:0;3e788d7781dc:37345 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 3e788d7781dc%2C37345%2C1733910656773.meta:.meta(num 1733910657859) 2024-12-11T09:50:59,232 INFO [RS:2;3e788d7781dc:44149 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 3e788d7781dc%2C44149%2C1733910656854:(num 1733910657514) 2024-12-11T09:50:59,233 DEBUG [RS:2;3e788d7781dc:44149 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T09:50:59,233 INFO [RS:2;3e788d7781dc:44149 {}] regionserver.LeaseManager(133): Closed leases 2024-12-11T09:50:59,233 INFO [RS:2;3e788d7781dc:44149 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-11T09:50:59,233 INFO [RS:2;3e788d7781dc:44149 {}] hbase.ChoreService(370): Chore service for: regionserver/3e788d7781dc:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-11T09:50:59,233 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-11T09:50:59,233 INFO [RS:2;3e788d7781dc:44149 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-11T09:50:59,233 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-11T09:50:59,233 INFO [regionserver/3e788d7781dc:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-11T09:50:59,233 INFO [RS:2;3e788d7781dc:44149 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-11T09:50:59,233 INFO [RS:2;3e788d7781dc:44149 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-11T09:50:59,233 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-11T09:50:59,233 INFO [RS:2;3e788d7781dc:44149 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-11T09:50:59,233 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-11T09:50:59,233 INFO [RS:2;3e788d7781dc:44149 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:44149 2024-12-11T09:50:59,233 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-11T09:50:59,236 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33295 is added to blk_1073741834_1010 (size=93) 2024-12-11T09:50:59,236 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37501 is added to blk_1073741834_1010 (size=93) 2024-12-11T09:50:59,236 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43797 is added to blk_1073741834_1010 (size=93) 2024-12-11T09:50:59,239 DEBUG [RS:0;3e788d7781dc:37345 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/2e5a5d3c-7e86-1c89-ce63-26e52d995414/oldWALs 2024-12-11T09:50:59,239 INFO [RS:0;3e788d7781dc:37345 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 3e788d7781dc%2C37345%2C1733910656773:(num 1733910657513) 2024-12-11T09:50:59,239 DEBUG [RS:0;3e788d7781dc:37345 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T09:50:59,239 INFO [RS:0;3e788d7781dc:37345 {}] regionserver.LeaseManager(133): Closed leases 2024-12-11T09:50:59,239 INFO [RS:0;3e788d7781dc:37345 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-11T09:50:59,239 INFO [RS:0;3e788d7781dc:37345 {}] hbase.ChoreService(370): Chore service for: regionserver/3e788d7781dc:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-11T09:50:59,239 INFO [RS:0;3e788d7781dc:37345 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-11T09:50:59,239 INFO [regionserver/3e788d7781dc:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-11T09:50:59,239 INFO [RS:0;3e788d7781dc:37345 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:37345 2024-12-11T09:50:59,242 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37097-0x100148012df0000, quorum=127.0.0.1:63562, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-11T09:50:59,242 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44149-0x100148012df0003, quorum=127.0.0.1:63562, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/3e788d7781dc,44149,1733910656854 2024-12-11T09:50:59,242 INFO [RS:2;3e788d7781dc:44149 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-11T09:50:59,251 INFO [RS:0;3e788d7781dc:37345 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-11T09:50:59,251 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37345-0x100148012df0001, quorum=127.0.0.1:63562, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/3e788d7781dc,37345,1733910656773 2024-12-11T09:50:59,251 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [3e788d7781dc,37345,1733910656773] 2024-12-11T09:50:59,267 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/3e788d7781dc,37345,1733910656773 already deleted, retry=false 2024-12-11T09:50:59,268 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 3e788d7781dc,37345,1733910656773 expired; onlineServers=1 2024-12-11T09:50:59,268 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [3e788d7781dc,44149,1733910656854] 2024-12-11T09:50:59,276 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/3e788d7781dc,44149,1733910656854 already deleted, retry=false 2024-12-11T09:50:59,276 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 3e788d7781dc,44149,1733910656854 expired; onlineServers=0 2024-12-11T09:50:59,276 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '3e788d7781dc,37097,1733910656630' ***** 2024-12-11T09:50:59,276 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-11T09:50:59,276 INFO [M:0;3e788d7781dc:37097 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-11T09:50:59,276 INFO [M:0;3e788d7781dc:37097 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-11T09:50:59,276 DEBUG [M:0;3e788d7781dc:37097 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-11T09:50:59,276 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-11T09:50:59,276 DEBUG [M:0;3e788d7781dc:37097 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-11T09:50:59,276 DEBUG [master/3e788d7781dc:0:becomeActiveMaster-HFileCleaner.large.0-1733910657204 {}] cleaner.HFileCleaner(306): Exit Thread[master/3e788d7781dc:0:becomeActiveMaster-HFileCleaner.large.0-1733910657204,5,FailOnTimeoutGroup] 2024-12-11T09:50:59,276 DEBUG [master/3e788d7781dc:0:becomeActiveMaster-HFileCleaner.small.0-1733910657204 {}] cleaner.HFileCleaner(306): Exit Thread[master/3e788d7781dc:0:becomeActiveMaster-HFileCleaner.small.0-1733910657204,5,FailOnTimeoutGroup] 2024-12-11T09:50:59,277 INFO [M:0;3e788d7781dc:37097 {}] hbase.ChoreService(370): Chore service for: master/3e788d7781dc:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-11T09:50:59,277 INFO [M:0;3e788d7781dc:37097 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-11T09:50:59,277 DEBUG [M:0;3e788d7781dc:37097 {}] master.HMaster(1795): Stopping service threads 2024-12-11T09:50:59,277 INFO [M:0;3e788d7781dc:37097 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-11T09:50:59,277 INFO [M:0;3e788d7781dc:37097 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-11T09:50:59,277 INFO [M:0;3e788d7781dc:37097 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-11T09:50:59,277 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-11T09:50:59,284 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37097-0x100148012df0000, quorum=127.0.0.1:63562, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-11T09:50:59,284 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37097-0x100148012df0000, quorum=127.0.0.1:63562, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T09:50:59,284 DEBUG [M:0;3e788d7781dc:37097 {}] zookeeper.ZKUtil(347): master:37097-0x100148012df0000, quorum=127.0.0.1:63562, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-11T09:50:59,284 WARN [M:0;3e788d7781dc:37097 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-11T09:50:59,285 INFO [M:0;3e788d7781dc:37097 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:42845/user/jenkins/test-data/2e5a5d3c-7e86-1c89-ce63-26e52d995414/.lastflushedseqids 2024-12-11T09:50:59,288 WARN [IPC Server handler 0 on default port 42845 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-11T09:50:59,288 WARN [IPC Server handler 0 on default port 42845 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-11T09:50:59,288 WARN [IPC Server handler 0 on default port 42845 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-11T09:50:59,293 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33295 is added to blk_1073741843_1019 (size=127) 2024-12-11T09:50:59,293 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43797 is added to blk_1073741843_1019 (size=127) 2024-12-11T09:50:59,294 INFO [M:0;3e788d7781dc:37097 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-11T09:50:59,294 INFO [M:0;3e788d7781dc:37097 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-11T09:50:59,294 DEBUG [M:0;3e788d7781dc:37097 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-11T09:50:59,294 INFO [M:0;3e788d7781dc:37097 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-11T09:50:59,294 DEBUG [M:0;3e788d7781dc:37097 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-11T09:50:59,294 DEBUG [M:0;3e788d7781dc:37097 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-11T09:50:59,294 DEBUG [M:0;3e788d7781dc:37097 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-11T09:50:59,294 INFO [M:0;3e788d7781dc:37097 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=26.82 KB heapSize=34.11 KB 2024-12-11T09:50:59,310 DEBUG [M:0;3e788d7781dc:37097 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42845/user/jenkins/test-data/2e5a5d3c-7e86-1c89-ce63-26e52d995414/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/d3cecffd39444cd1907f25cdcb31ae0a is 82, key is hbase:meta,,1/info:regioninfo/1733910657894/Put/seqid=0 2024-12-11T09:50:59,312 WARN [IPC Server handler 3 on default port 42845 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-11T09:50:59,312 WARN [IPC Server handler 3 on default port 42845 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-11T09:50:59,312 WARN [IPC Server handler 3 on default port 42845 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-11T09:50:59,316 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43797 is added to blk_1073741844_1020 (size=5672) 2024-12-11T09:50:59,316 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33295 is added to blk_1073741844_1020 (size=5672) 2024-12-11T09:50:59,316 INFO [M:0;3e788d7781dc:37097 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:42845/user/jenkins/test-data/2e5a5d3c-7e86-1c89-ce63-26e52d995414/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/d3cecffd39444cd1907f25cdcb31ae0a 2024-12-11T09:50:59,338 DEBUG [M:0;3e788d7781dc:37097 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42845/user/jenkins/test-data/2e5a5d3c-7e86-1c89-ce63-26e52d995414/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/b62df871af324af98a12584314721304 is 747, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1733910658405/Put/seqid=0 2024-12-11T09:50:59,339 WARN [IPC Server handler 4 on default port 42845 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-11T09:50:59,339 WARN [IPC Server handler 4 on default port 42845 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-11T09:50:59,339 WARN [IPC Server handler 4 on default port 42845 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-11T09:50:59,343 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43797 is added to blk_1073741845_1021 (size=6438) 2024-12-11T09:50:59,344 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33295 is added to blk_1073741845_1021 (size=6438) 2024-12-11T09:50:59,344 INFO [M:0;3e788d7781dc:37097 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.13 KB at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:42845/user/jenkins/test-data/2e5a5d3c-7e86-1c89-ce63-26e52d995414/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/b62df871af324af98a12584314721304 2024-12-11T09:50:59,359 INFO [RS:2;3e788d7781dc:44149 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-11T09:50:59,359 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44149-0x100148012df0003, quorum=127.0.0.1:63562, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-11T09:50:59,359 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44149-0x100148012df0003, quorum=127.0.0.1:63562, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-11T09:50:59,359 INFO [RS:2;3e788d7781dc:44149 {}] regionserver.HRegionServer(1031): Exiting; stopping=3e788d7781dc,44149,1733910656854; zookeeper connection closed. 2024-12-11T09:50:59,360 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@360168c5 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@360168c5 2024-12-11T09:50:59,364 DEBUG [M:0;3e788d7781dc:37097 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42845/user/jenkins/test-data/2e5a5d3c-7e86-1c89-ce63-26e52d995414/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/875f36fab54e4df5ab2a822c98472a9c is 69, key is 3e788d7781dc,37345,1733910656773/rs:state/1733910657329/Put/seqid=0 2024-12-11T09:50:59,365 WARN [IPC Server handler 3 on default port 42845 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-11T09:50:59,365 WARN [IPC Server handler 3 on default port 42845 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-11T09:50:59,365 WARN [IPC Server handler 3 on default port 42845 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-11T09:50:59,368 INFO [RS:0;3e788d7781dc:37345 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-11T09:50:59,368 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37345-0x100148012df0001, quorum=127.0.0.1:63562, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-11T09:50:59,368 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37345-0x100148012df0001, quorum=127.0.0.1:63562, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-11T09:50:59,368 INFO [RS:0;3e788d7781dc:37345 {}] regionserver.HRegionServer(1031): Exiting; stopping=3e788d7781dc,37345,1733910656773; zookeeper connection closed. 2024-12-11T09:50:59,368 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@62e249d6 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@62e249d6 2024-12-11T09:50:59,368 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 3 regionserver(s) complete 2024-12-11T09:50:59,371 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43797 is added to blk_1073741846_1022 (size=5294) 2024-12-11T09:50:59,371 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33295 is added to blk_1073741846_1022 (size=5294) 2024-12-11T09:50:59,372 INFO [M:0;3e788d7781dc:37097 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=195 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:42845/user/jenkins/test-data/2e5a5d3c-7e86-1c89-ce63-26e52d995414/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/875f36fab54e4df5ab2a822c98472a9c 2024-12-11T09:50:59,379 DEBUG [M:0;3e788d7781dc:37097 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42845/user/jenkins/test-data/2e5a5d3c-7e86-1c89-ce63-26e52d995414/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/d3cecffd39444cd1907f25cdcb31ae0a as hdfs://localhost:42845/user/jenkins/test-data/2e5a5d3c-7e86-1c89-ce63-26e52d995414/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/d3cecffd39444cd1907f25cdcb31ae0a 2024-12-11T09:50:59,387 INFO [M:0;3e788d7781dc:37097 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42845/user/jenkins/test-data/2e5a5d3c-7e86-1c89-ce63-26e52d995414/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/d3cecffd39444cd1907f25cdcb31ae0a, entries=8, sequenceid=72, filesize=5.5 K 2024-12-11T09:50:59,388 DEBUG [M:0;3e788d7781dc:37097 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42845/user/jenkins/test-data/2e5a5d3c-7e86-1c89-ce63-26e52d995414/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/b62df871af324af98a12584314721304 as hdfs://localhost:42845/user/jenkins/test-data/2e5a5d3c-7e86-1c89-ce63-26e52d995414/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/b62df871af324af98a12584314721304 2024-12-11T09:50:59,394 INFO [M:0;3e788d7781dc:37097 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42845/user/jenkins/test-data/2e5a5d3c-7e86-1c89-ce63-26e52d995414/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/b62df871af324af98a12584314721304, entries=8, sequenceid=72, filesize=6.3 K 2024-12-11T09:50:59,396 DEBUG [M:0;3e788d7781dc:37097 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42845/user/jenkins/test-data/2e5a5d3c-7e86-1c89-ce63-26e52d995414/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/875f36fab54e4df5ab2a822c98472a9c as hdfs://localhost:42845/user/jenkins/test-data/2e5a5d3c-7e86-1c89-ce63-26e52d995414/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/875f36fab54e4df5ab2a822c98472a9c 2024-12-11T09:50:59,402 INFO [M:0;3e788d7781dc:37097 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42845/user/jenkins/test-data/2e5a5d3c-7e86-1c89-ce63-26e52d995414/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/875f36fab54e4df5ab2a822c98472a9c, entries=3, sequenceid=72, filesize=5.2 K 2024-12-11T09:50:59,403 INFO [M:0;3e788d7781dc:37097 {}] regionserver.HRegion(3140): Finished flush of dataSize ~26.82 KB/27459, heapSize ~33.81 KB/34624, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 109ms, sequenceid=72, compaction requested=false 2024-12-11T09:50:59,404 INFO [M:0;3e788d7781dc:37097 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-11T09:50:59,404 DEBUG [M:0;3e788d7781dc:37097 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733910659294Disabling compacts and flushes for region at 1733910659294Disabling writes for close at 1733910659294Obtaining lock to block concurrent updates at 1733910659294Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733910659294Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=27459, getHeapSize=34864, getOffHeapSize=0, getCellsCount=85 at 1733910659295 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733910659296 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733910659296Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733910659310 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733910659310Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733910659323 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733910659337 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733910659337Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733910659350 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733910659364 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733910659364Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@112a5bfd: reopening flushed file at 1733910659378 (+14 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5db10b6c: reopening flushed file at 1733910659387 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@44cced9c: reopening flushed file at 1733910659394 (+7 ms)Finished flush of dataSize ~26.82 KB/27459, heapSize ~33.81 KB/34624, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 109ms, sequenceid=72, compaction requested=false at 1733910659403 (+9 ms)Writing region close event to WAL at 1733910659404 (+1 ms)Closed at 1733910659404 2024-12-11T09:50:59,405 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-11T09:50:59,405 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-11T09:50:59,405 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-11T09:50:59,405 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-11T09:50:59,405 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-11T09:50:59,407 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33295 is added to blk_1073741830_1006 (size=32662) 2024-12-11T09:50:59,407 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43797 is added to blk_1073741830_1006 (size=32662) 2024-12-11T09:50:59,408 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37501 is added to blk_1073741830_1006 (size=32662) 2024-12-11T09:50:59,408 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-11T09:50:59,408 INFO [M:0;3e788d7781dc:37097 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-11T09:50:59,408 INFO [M:0;3e788d7781dc:37097 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:37097 2024-12-11T09:50:59,409 INFO [M:0;3e788d7781dc:37097 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-11T09:50:59,518 INFO [M:0;3e788d7781dc:37097 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-11T09:50:59,518 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37097-0x100148012df0000, quorum=127.0.0.1:63562, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-11T09:50:59,518 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37097-0x100148012df0000, quorum=127.0.0.1:63562, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-11T09:50:59,521 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@52ded98a{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-11T09:50:59,521 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7bed397a{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-11T09:50:59,521 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-11T09:50:59,521 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@287e3901{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-11T09:50:59,522 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@56fa1103{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/51500e69-9ef2-6db9-00c6-95f89a13a064/hadoop.log.dir/,STOPPED} 2024-12-11T09:50:59,523 WARN [BP-1694945727-172.17.0.2-1733910654907 heartbeating to localhost/127.0.0.1:42845 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-11T09:50:59,523 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-11T09:50:59,523 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-11T09:50:59,523 WARN [BP-1694945727-172.17.0.2-1733910654907 heartbeating to localhost/127.0.0.1:42845 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1694945727-172.17.0.2-1733910654907 (Datanode Uuid cd262694-4559-4c7d-99d4-f7637c740e15) service to localhost/127.0.0.1:42845 2024-12-11T09:50:59,524 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/51500e69-9ef2-6db9-00c6-95f89a13a064/cluster_4fb5000e-a59a-ec64-7f7d-11c627dc53d6/data/data5/current/BP-1694945727-172.17.0.2-1733910654907 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-11T09:50:59,525 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/51500e69-9ef2-6db9-00c6-95f89a13a064/cluster_4fb5000e-a59a-ec64-7f7d-11c627dc53d6/data/data6/current/BP-1694945727-172.17.0.2-1733910654907 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-11T09:50:59,525 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-11T09:50:59,527 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@51a3a305{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-11T09:50:59,528 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@196fd311{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-11T09:50:59,528 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-11T09:50:59,528 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@867097b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-11T09:50:59,528 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@136685e2{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/51500e69-9ef2-6db9-00c6-95f89a13a064/hadoop.log.dir/,STOPPED} 2024-12-11T09:50:59,530 WARN [BP-1694945727-172.17.0.2-1733910654907 heartbeating to localhost/127.0.0.1:42845 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-11T09:50:59,530 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-11T09:50:59,530 WARN [BP-1694945727-172.17.0.2-1733910654907 heartbeating to localhost/127.0.0.1:42845 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1694945727-172.17.0.2-1733910654907 (Datanode Uuid 5b35181d-f92d-4a50-a88b-bad24ee9a81e) service to localhost/127.0.0.1:42845 2024-12-11T09:50:59,530 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-11T09:50:59,531 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/51500e69-9ef2-6db9-00c6-95f89a13a064/cluster_4fb5000e-a59a-ec64-7f7d-11c627dc53d6/data/data3/current/BP-1694945727-172.17.0.2-1733910654907 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-11T09:50:59,531 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/51500e69-9ef2-6db9-00c6-95f89a13a064/cluster_4fb5000e-a59a-ec64-7f7d-11c627dc53d6/data/data4/current/BP-1694945727-172.17.0.2-1733910654907 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-11T09:50:59,531 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-11T09:50:59,534 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2f0ad577{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-11T09:50:59,534 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@74f05853{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-11T09:50:59,534 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-11T09:50:59,534 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@796906a8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-11T09:50:59,534 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@25acd767{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/51500e69-9ef2-6db9-00c6-95f89a13a064/hadoop.log.dir/,STOPPED} 2024-12-11T09:50:59,535 WARN [BP-1694945727-172.17.0.2-1733910654907 heartbeating to localhost/127.0.0.1:42845 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-11T09:50:59,535 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-11T09:50:59,535 WARN [BP-1694945727-172.17.0.2-1733910654907 heartbeating to localhost/127.0.0.1:42845 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1694945727-172.17.0.2-1733910654907 (Datanode Uuid 24d472da-0c6b-4dba-adef-a5361d44a88b) service to localhost/127.0.0.1:42845 2024-12-11T09:50:59,535 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-11T09:50:59,536 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/51500e69-9ef2-6db9-00c6-95f89a13a064/cluster_4fb5000e-a59a-ec64-7f7d-11c627dc53d6/data/data1/current/BP-1694945727-172.17.0.2-1733910654907 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-11T09:50:59,536 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/51500e69-9ef2-6db9-00c6-95f89a13a064/cluster_4fb5000e-a59a-ec64-7f7d-11c627dc53d6/data/data2/current/BP-1694945727-172.17.0.2-1733910654907 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-11T09:50:59,536 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-11T09:50:59,540 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@8948308{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-11T09:50:59,541 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3504f92f{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-11T09:50:59,541 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-11T09:50:59,541 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5973d122{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-11T09:50:59,541 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6ee3f473{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/51500e69-9ef2-6db9-00c6-95f89a13a064/hadoop.log.dir/,STOPPED} 2024-12-11T09:50:59,547 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-11T09:50:59,570 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-11T09:50:59,577 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestHBaseWalOnEC#testReadWrite[1] Thread=153 (was 91) - Thread LEAK? -, OpenFileDescriptor=521 (was 439) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=244 (was 248), ProcessCount=11 (was 11), AvailableMemoryMB=7948 (was 8153)