2024-11-19 12:44:48,075 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba 2024-11-19 12:44:48,088 main DEBUG Took 0.011258 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-11-19 12:44:48,088 main DEBUG PluginManager 'Core' found 129 plugins 2024-11-19 12:44:48,089 main DEBUG PluginManager 'Level' found 0 plugins 2024-11-19 12:44:48,090 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-11-19 12:44:48,091 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-19 12:44:48,101 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-11-19 12:44:48,119 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-19 12:44:48,121 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-19 12:44:48,121 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-19 12:44:48,122 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-19 12:44:48,122 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-19 12:44:48,122 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-19 12:44:48,123 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-19 12:44:48,124 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-19 12:44:48,124 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-19 12:44:48,124 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-19 12:44:48,125 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-19 12:44:48,126 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-19 12:44:48,127 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-19 12:44:48,127 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-19 12:44:48,128 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-19 12:44:48,128 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-19 12:44:48,129 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-19 12:44:48,130 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-19 12:44:48,130 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-19 12:44:48,131 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-19 12:44:48,131 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-19 12:44:48,132 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-19 12:44:48,132 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-19 12:44:48,133 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-19 12:44:48,134 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-19 12:44:48,134 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-11-19 12:44:48,136 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-19 12:44:48,138 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-11-19 12:44:48,139 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-11-19 12:44:48,140 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-11-19 12:44:48,141 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-11-19 12:44:48,142 main DEBUG PluginManager 'Converter' found 47 plugins 2024-11-19 12:44:48,151 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-11-19 12:44:48,154 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-11-19 12:44:48,156 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-11-19 12:44:48,156 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-11-19 12:44:48,157 main DEBUG createAppenders(={Console}) 2024-11-19 12:44:48,158 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba initialized 2024-11-19 12:44:48,158 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba 2024-11-19 12:44:48,159 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba OK. 2024-11-19 12:44:48,160 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-11-19 12:44:48,160 main DEBUG OutputStream closed 2024-11-19 12:44:48,161 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-11-19 12:44:48,161 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-11-19 12:44:48,161 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@49c7b90e OK 2024-11-19 12:44:48,259 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-11-19 12:44:48,263 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-11-19 12:44:48,264 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-11-19 12:44:48,266 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-11-19 12:44:48,267 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-11-19 12:44:48,267 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-11-19 12:44:48,268 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-11-19 12:44:48,269 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-11-19 12:44:48,269 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-11-19 12:44:48,270 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-11-19 12:44:48,270 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-11-19 12:44:48,271 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-11-19 12:44:48,272 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-11-19 12:44:48,273 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-11-19 12:44:48,273 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-11-19 12:44:48,274 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-11-19 12:44:48,274 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-11-19 12:44:48,275 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-11-19 12:44:48,279 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-19 12:44:48,288 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-logging/target/hbase-logging-4.0.0-alpha-1-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@35432107) with optional ClassLoader: null 2024-11-19 12:44:48,289 main DEBUG Shutdown hook enabled. Registering a new one. 2024-11-19 12:44:48,290 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@35432107] started OK. 2024-11-19T12:44:48,317 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC timeout: 26 mins 2024-11-19 12:44:48,321 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-11-19 12:44:48,321 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-19T12:44:48,602 DEBUG [main {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1913be47-5358-ff7c-2f97-6096e5f45004 2024-11-19T12:44:48,629 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1913be47-5358-ff7c-2f97-6096e5f45004/cluster_9d93da93-c9ba-544b-3001-3bf8a224a26e, deleteOnExit=true 2024-11-19T12:44:48,630 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1913be47-5358-ff7c-2f97-6096e5f45004/test.cache.data in system properties and HBase conf 2024-11-19T12:44:48,631 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1913be47-5358-ff7c-2f97-6096e5f45004/hadoop.tmp.dir in system properties and HBase conf 2024-11-19T12:44:48,632 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1913be47-5358-ff7c-2f97-6096e5f45004/hadoop.log.dir in system properties and HBase conf 2024-11-19T12:44:48,632 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1913be47-5358-ff7c-2f97-6096e5f45004/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-19T12:44:48,633 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1913be47-5358-ff7c-2f97-6096e5f45004/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-19T12:44:48,633 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-19T12:44:48,722 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-11-19T12:44:48,811 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-19T12:44:48,816 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1913be47-5358-ff7c-2f97-6096e5f45004/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-19T12:44:48,817 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1913be47-5358-ff7c-2f97-6096e5f45004/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-19T12:44:48,817 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1913be47-5358-ff7c-2f97-6096e5f45004/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-19T12:44:48,818 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1913be47-5358-ff7c-2f97-6096e5f45004/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-19T12:44:48,818 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1913be47-5358-ff7c-2f97-6096e5f45004/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-19T12:44:48,819 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1913be47-5358-ff7c-2f97-6096e5f45004/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-19T12:44:48,819 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1913be47-5358-ff7c-2f97-6096e5f45004/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-19T12:44:48,820 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1913be47-5358-ff7c-2f97-6096e5f45004/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-19T12:44:48,821 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1913be47-5358-ff7c-2f97-6096e5f45004/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-19T12:44:48,821 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1913be47-5358-ff7c-2f97-6096e5f45004/nfs.dump.dir in system properties and HBase conf 2024-11-19T12:44:48,822 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1913be47-5358-ff7c-2f97-6096e5f45004/java.io.tmpdir in system properties and HBase conf 2024-11-19T12:44:48,822 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1913be47-5358-ff7c-2f97-6096e5f45004/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-19T12:44:48,823 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1913be47-5358-ff7c-2f97-6096e5f45004/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-19T12:44:48,824 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1913be47-5358-ff7c-2f97-6096e5f45004/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-19T12:44:49,982 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-11-19T12:44:50,070 INFO [Time-limited test {}] log.Log(170): Logging initialized @2741ms to org.eclipse.jetty.util.log.Slf4jLog 2024-11-19T12:44:50,166 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T12:44:50,246 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-19T12:44:50,279 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-19T12:44:50,279 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-19T12:44:50,281 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-19T12:44:50,297 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T12:44:50,300 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@760c69c0{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1913be47-5358-ff7c-2f97-6096e5f45004/hadoop.log.dir/,AVAILABLE} 2024-11-19T12:44:50,300 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@ce709a8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-19T12:44:50,526 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@62d6efd9{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1913be47-5358-ff7c-2f97-6096e5f45004/java.io.tmpdir/jetty-localhost-45579-hadoop-hdfs-3_4_1-tests_jar-_-any-5982313040390725277/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-19T12:44:50,535 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@353d35a1{HTTP/1.1, (http/1.1)}{localhost:45579} 2024-11-19T12:44:50,536 INFO [Time-limited test {}] server.Server(415): Started @3207ms 2024-11-19T12:44:51,142 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T12:44:51,161 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-19T12:44:51,162 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-19T12:44:51,162 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-19T12:44:51,162 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-19T12:44:51,164 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3665148e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1913be47-5358-ff7c-2f97-6096e5f45004/hadoop.log.dir/,AVAILABLE} 2024-11-19T12:44:51,164 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3b3a0659{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-19T12:44:51,292 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5b23cf15{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1913be47-5358-ff7c-2f97-6096e5f45004/java.io.tmpdir/jetty-localhost-42859-hadoop-hdfs-3_4_1-tests_jar-_-any-6280600792550921151/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T12:44:51,293 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7f9e5902{HTTP/1.1, (http/1.1)}{localhost:42859} 2024-11-19T12:44:51,293 INFO [Time-limited test {}] server.Server(415): Started @3964ms 2024-11-19T12:44:51,361 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-19T12:44:51,529 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T12:44:51,536 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-19T12:44:51,543 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-19T12:44:51,543 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-19T12:44:51,544 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-19T12:44:51,545 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5435fd88{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1913be47-5358-ff7c-2f97-6096e5f45004/hadoop.log.dir/,AVAILABLE} 2024-11-19T12:44:51,546 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@65cd6e19{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-19T12:44:51,664 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@14402056{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1913be47-5358-ff7c-2f97-6096e5f45004/java.io.tmpdir/jetty-localhost-40597-hadoop-hdfs-3_4_1-tests_jar-_-any-11201500144093994623/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T12:44:51,664 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3e1cb3ec{HTTP/1.1, (http/1.1)}{localhost:40597} 2024-11-19T12:44:51,665 INFO [Time-limited test {}] server.Server(415): Started @4336ms 2024-11-19T12:44:51,667 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-19T12:44:51,709 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T12:44:51,714 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-19T12:44:51,719 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-19T12:44:51,719 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-19T12:44:51,720 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-19T12:44:51,721 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@233bb3ea{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1913be47-5358-ff7c-2f97-6096e5f45004/hadoop.log.dir/,AVAILABLE} 2024-11-19T12:44:51,721 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@146c020c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-19T12:44:51,850 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2a9ecb50{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1913be47-5358-ff7c-2f97-6096e5f45004/java.io.tmpdir/jetty-localhost-39983-hadoop-hdfs-3_4_1-tests_jar-_-any-1172210946144102929/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T12:44:51,851 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4067fd8a{HTTP/1.1, (http/1.1)}{localhost:39983} 2024-11-19T12:44:51,851 INFO [Time-limited test {}] server.Server(415): Started @4523ms 2024-11-19T12:44:51,853 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-19T12:44:52,778 WARN [Thread-125 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1913be47-5358-ff7c-2f97-6096e5f45004/cluster_9d93da93-c9ba-544b-3001-3bf8a224a26e/data/data4/current/BP-1627058744-172.17.0.2-1732020289496/current, will proceed with Du for space computation calculation, 2024-11-19T12:44:52,778 WARN [Thread-124 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1913be47-5358-ff7c-2f97-6096e5f45004/cluster_9d93da93-c9ba-544b-3001-3bf8a224a26e/data/data3/current/BP-1627058744-172.17.0.2-1732020289496/current, will proceed with Du for space computation calculation, 2024-11-19T12:44:52,778 WARN [Thread-123 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1913be47-5358-ff7c-2f97-6096e5f45004/cluster_9d93da93-c9ba-544b-3001-3bf8a224a26e/data/data2/current/BP-1627058744-172.17.0.2-1732020289496/current, will proceed with Du for space computation calculation, 2024-11-19T12:44:52,778 WARN [Thread-122 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1913be47-5358-ff7c-2f97-6096e5f45004/cluster_9d93da93-c9ba-544b-3001-3bf8a224a26e/data/data1/current/BP-1627058744-172.17.0.2-1732020289496/current, will proceed with Du for space computation calculation, 2024-11-19T12:44:52,813 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-19T12:44:52,813 WARN [Thread-81 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-19T12:44:52,824 WARN [Thread-142 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1913be47-5358-ff7c-2f97-6096e5f45004/cluster_9d93da93-c9ba-544b-3001-3bf8a224a26e/data/data5/current/BP-1627058744-172.17.0.2-1732020289496/current, will proceed with Du for space computation calculation, 2024-11-19T12:44:52,824 WARN [Thread-143 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1913be47-5358-ff7c-2f97-6096e5f45004/cluster_9d93da93-c9ba-544b-3001-3bf8a224a26e/data/data6/current/BP-1627058744-172.17.0.2-1732020289496/current, will proceed with Du for space computation calculation, 2024-11-19T12:44:52,856 WARN [Thread-103 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-19T12:44:52,869 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xaff754980e17560f with lease ID 0x7c6f8fdcada6e125: Processing first storage report for DS-43572478-5164-419f-a9eb-305ca567bf8f from datanode DatanodeRegistration(127.0.0.1:44341, datanodeUuid=44509b0b-7f0c-4d2e-8f17-d0d0a3f1db0b, infoPort=45439, infoSecurePort=0, ipcPort=43839, storageInfo=lv=-57;cid=testClusterID;nsid=938823620;c=1732020289496) 2024-11-19T12:44:52,871 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xaff754980e17560f with lease ID 0x7c6f8fdcada6e125: from storage DS-43572478-5164-419f-a9eb-305ca567bf8f node DatanodeRegistration(127.0.0.1:44341, datanodeUuid=44509b0b-7f0c-4d2e-8f17-d0d0a3f1db0b, infoPort=45439, infoSecurePort=0, ipcPort=43839, storageInfo=lv=-57;cid=testClusterID;nsid=938823620;c=1732020289496), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-19T12:44:52,872 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x5f26c9494043220a with lease ID 0x7c6f8fdcada6e126: Processing first storage report for DS-7d1377b4-6a4e-4066-8877-0991cc7bfade from datanode DatanodeRegistration(127.0.0.1:36121, datanodeUuid=bed16c64-0153-4447-a8cf-a6c16b525072, infoPort=42319, infoSecurePort=0, ipcPort=37899, storageInfo=lv=-57;cid=testClusterID;nsid=938823620;c=1732020289496) 2024-11-19T12:44:52,872 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x5f26c9494043220a with lease ID 0x7c6f8fdcada6e126: from storage DS-7d1377b4-6a4e-4066-8877-0991cc7bfade node DatanodeRegistration(127.0.0.1:36121, datanodeUuid=bed16c64-0153-4447-a8cf-a6c16b525072, infoPort=42319, infoSecurePort=0, ipcPort=37899, storageInfo=lv=-57;cid=testClusterID;nsid=938823620;c=1732020289496), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T12:44:52,873 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xe3bbe2f1d14c4e90 with lease ID 0x7c6f8fdcada6e124: Processing first storage report for DS-89d00d59-1d1c-4772-a423-c4ee7d398bc1 from datanode DatanodeRegistration(127.0.0.1:38679, datanodeUuid=df794f08-e502-4164-a31a-dcfc4e2d6194, infoPort=40853, infoSecurePort=0, ipcPort=42641, storageInfo=lv=-57;cid=testClusterID;nsid=938823620;c=1732020289496) 2024-11-19T12:44:52,873 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe3bbe2f1d14c4e90 with lease ID 0x7c6f8fdcada6e124: from storage DS-89d00d59-1d1c-4772-a423-c4ee7d398bc1 node DatanodeRegistration(127.0.0.1:38679, datanodeUuid=df794f08-e502-4164-a31a-dcfc4e2d6194, infoPort=40853, infoSecurePort=0, ipcPort=42641, storageInfo=lv=-57;cid=testClusterID;nsid=938823620;c=1732020289496), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T12:44:52,873 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xaff754980e17560f with lease ID 0x7c6f8fdcada6e125: Processing first storage report for DS-80d5fdf0-a133-4b56-a096-e7104ab3d101 from datanode DatanodeRegistration(127.0.0.1:44341, datanodeUuid=44509b0b-7f0c-4d2e-8f17-d0d0a3f1db0b, infoPort=45439, infoSecurePort=0, ipcPort=43839, storageInfo=lv=-57;cid=testClusterID;nsid=938823620;c=1732020289496) 2024-11-19T12:44:52,874 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xaff754980e17560f with lease ID 0x7c6f8fdcada6e125: from storage DS-80d5fdf0-a133-4b56-a096-e7104ab3d101 node DatanodeRegistration(127.0.0.1:44341, datanodeUuid=44509b0b-7f0c-4d2e-8f17-d0d0a3f1db0b, infoPort=45439, infoSecurePort=0, ipcPort=43839, storageInfo=lv=-57;cid=testClusterID;nsid=938823620;c=1732020289496), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-19T12:44:52,874 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x5f26c9494043220a with lease ID 0x7c6f8fdcada6e126: Processing first storage report for DS-735909e8-8445-47e4-aedf-67b16e9cf462 from datanode DatanodeRegistration(127.0.0.1:36121, datanodeUuid=bed16c64-0153-4447-a8cf-a6c16b525072, infoPort=42319, infoSecurePort=0, ipcPort=37899, storageInfo=lv=-57;cid=testClusterID;nsid=938823620;c=1732020289496) 2024-11-19T12:44:52,874 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x5f26c9494043220a with lease ID 0x7c6f8fdcada6e126: from storage DS-735909e8-8445-47e4-aedf-67b16e9cf462 node DatanodeRegistration(127.0.0.1:36121, datanodeUuid=bed16c64-0153-4447-a8cf-a6c16b525072, infoPort=42319, infoSecurePort=0, ipcPort=37899, storageInfo=lv=-57;cid=testClusterID;nsid=938823620;c=1732020289496), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T12:44:52,875 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xe3bbe2f1d14c4e90 with lease ID 0x7c6f8fdcada6e124: Processing first storage report for DS-8d533b59-6703-4c72-a9eb-856fd5488420 from datanode DatanodeRegistration(127.0.0.1:38679, datanodeUuid=df794f08-e502-4164-a31a-dcfc4e2d6194, infoPort=40853, infoSecurePort=0, ipcPort=42641, storageInfo=lv=-57;cid=testClusterID;nsid=938823620;c=1732020289496) 2024-11-19T12:44:52,876 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe3bbe2f1d14c4e90 with lease ID 0x7c6f8fdcada6e124: from storage DS-8d533b59-6703-4c72-a9eb-856fd5488420 node DatanodeRegistration(127.0.0.1:38679, datanodeUuid=df794f08-e502-4164-a31a-dcfc4e2d6194, infoPort=40853, infoSecurePort=0, ipcPort=42641, storageInfo=lv=-57;cid=testClusterID;nsid=938823620;c=1732020289496), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-19T12:44:52,899 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1913be47-5358-ff7c-2f97-6096e5f45004 2024-11-19T12:44:52,986 WARN [Time-limited test {}] erasurecode.ErasureCodeNative(55): ISA-L support is not available in your platform... using builtin-java codec where applicable 2024-11-19T12:44:53,078 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestHBaseWalOnEC#testReadWrite[0] Thread=157, OpenFileDescriptor=391, MaxFileDescriptor=1048576, SystemLoadAverage=313, ProcessCount=11, AvailableMemoryMB=7124 2024-11-19T12:44:53,081 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=3, rsPorts=, rsClass=null, numDataNodes=3, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-19T12:44:53,096 INFO [Time-limited test {}] hbase.HBaseTestingUtil(821): NOT STARTING DFS 2024-11-19T12:44:53,206 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1913be47-5358-ff7c-2f97-6096e5f45004/cluster_9d93da93-c9ba-544b-3001-3bf8a224a26e/zookeeper_0, clientPort=56865, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1913be47-5358-ff7c-2f97-6096e5f45004/cluster_9d93da93-c9ba-544b-3001-3bf8a224a26e/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1913be47-5358-ff7c-2f97-6096e5f45004/cluster_9d93da93-c9ba-544b-3001-3bf8a224a26e/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-19T12:44:53,222 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=56865 2024-11-19T12:44:53,253 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T12:44:53,256 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T12:44:53,380 WARN [Time-limited test {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-19T12:44:53,381 WARN [Time-limited test {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-19T12:44:53,463 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_165084278_22 at /127.0.0.1:42806 [Receiving block BP-1627058744-172.17.0.2-1732020289496:blk_-9223372036854775792_1001] {}] datanode.DataXceiver(331): 127.0.0.1:44341:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:42806 dst: /127.0.0.1:44341 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:44:53,502 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44341 is added to blk_-9223372036854775792_1002 (size=7) 2024-11-19T12:44:53,898 WARN [Time-limited test {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-19T12:44:53,908 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:44033/user/jenkins/test-data/462b85e5-3bd6-25b7-7743-cf7d8d82e30b with version=8 2024-11-19T12:44:53,909 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:44033/user/jenkins/test-data/462b85e5-3bd6-25b7-7743-cf7d8d82e30b/hbase-staging 2024-11-19T12:44:54,017 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-11-19T12:44:54,310 INFO [Time-limited test {}] client.ConnectionUtils(128): master/aba5a916dfea:0 server-side Connection retries=45 2024-11-19T12:44:54,321 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-19T12:44:54,322 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-19T12:44:54,326 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-19T12:44:54,327 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-19T12:44:54,327 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-19T12:44:54,502 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-19T12:44:54,583 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-11-19T12:44:54,594 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-11-19T12:44:54,599 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-19T12:44:54,626 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 14638 (auto-detected) 2024-11-19T12:44:54,627 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-11-19T12:44:54,651 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:40479 2024-11-19T12:44:54,675 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:40479 connecting to ZooKeeper ensemble=127.0.0.1:56865 2024-11-19T12:44:54,834 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:404790x0, quorum=127.0.0.1:56865, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-19T12:44:54,838 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:40479-0x1015469e0fb0000 connected 2024-11-19T12:44:54,944 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T12:44:54,948 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T12:44:54,959 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:40479-0x1015469e0fb0000, quorum=127.0.0.1:56865, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-19T12:44:54,964 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:44033/user/jenkins/test-data/462b85e5-3bd6-25b7-7743-cf7d8d82e30b, hbase.cluster.distributed=false 2024-11-19T12:44:54,993 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:40479-0x1015469e0fb0000, quorum=127.0.0.1:56865, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-19T12:44:55,000 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=40479 2024-11-19T12:44:55,001 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=40479 2024-11-19T12:44:55,006 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=40479 2024-11-19T12:44:55,006 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=40479 2024-11-19T12:44:55,007 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=40479 2024-11-19T12:44:55,133 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/aba5a916dfea:0 server-side Connection retries=45 2024-11-19T12:44:55,134 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-19T12:44:55,135 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-19T12:44:55,135 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-19T12:44:55,135 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-19T12:44:55,136 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-19T12:44:55,139 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-19T12:44:55,142 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-19T12:44:55,143 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:39075 2024-11-19T12:44:55,145 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:39075 connecting to ZooKeeper ensemble=127.0.0.1:56865 2024-11-19T12:44:55,146 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T12:44:55,151 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T12:44:55,184 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:390750x0, quorum=127.0.0.1:56865, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-19T12:44:55,185 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:39075-0x1015469e0fb0001 connected 2024-11-19T12:44:55,185 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39075-0x1015469e0fb0001, quorum=127.0.0.1:56865, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-19T12:44:55,190 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-19T12:44:55,201 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-19T12:44:55,204 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39075-0x1015469e0fb0001, quorum=127.0.0.1:56865, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-19T12:44:55,210 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39075-0x1015469e0fb0001, quorum=127.0.0.1:56865, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-19T12:44:55,217 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=39075 2024-11-19T12:44:55,218 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=39075 2024-11-19T12:44:55,221 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=39075 2024-11-19T12:44:55,222 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=39075 2024-11-19T12:44:55,222 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=39075 2024-11-19T12:44:55,241 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/aba5a916dfea:0 server-side Connection retries=45 2024-11-19T12:44:55,241 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-19T12:44:55,241 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-19T12:44:55,242 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-19T12:44:55,242 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-19T12:44:55,242 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-19T12:44:55,243 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-19T12:44:55,243 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-19T12:44:55,244 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:32985 2024-11-19T12:44:55,246 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:32985 connecting to ZooKeeper ensemble=127.0.0.1:56865 2024-11-19T12:44:55,247 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T12:44:55,251 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T12:44:55,275 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:329850x0, quorum=127.0.0.1:56865, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-19T12:44:55,276 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:329850x0, quorum=127.0.0.1:56865, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-19T12:44:55,277 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-19T12:44:55,279 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:32985-0x1015469e0fb0002 connected 2024-11-19T12:44:55,282 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-19T12:44:55,284 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:32985-0x1015469e0fb0002, quorum=127.0.0.1:56865, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-19T12:44:55,286 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:32985-0x1015469e0fb0002, quorum=127.0.0.1:56865, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-19T12:44:55,291 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=32985 2024-11-19T12:44:55,291 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=32985 2024-11-19T12:44:55,292 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=32985 2024-11-19T12:44:55,292 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=32985 2024-11-19T12:44:55,293 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=32985 2024-11-19T12:44:55,318 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/aba5a916dfea:0 server-side Connection retries=45 2024-11-19T12:44:55,318 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-19T12:44:55,318 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-19T12:44:55,318 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-19T12:44:55,319 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-19T12:44:55,319 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-19T12:44:55,319 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-19T12:44:55,319 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-19T12:44:55,320 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:39515 2024-11-19T12:44:55,323 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:39515 connecting to ZooKeeper ensemble=127.0.0.1:56865 2024-11-19T12:44:55,325 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T12:44:55,328 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T12:44:55,347 DEBUG [pool-77-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:395150x0, quorum=127.0.0.1:56865, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-19T12:44:55,348 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:395150x0, quorum=127.0.0.1:56865, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-19T12:44:55,349 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-19T12:44:55,349 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:39515-0x1015469e0fb0003 connected 2024-11-19T12:44:55,350 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-19T12:44:55,351 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39515-0x1015469e0fb0003, quorum=127.0.0.1:56865, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-19T12:44:55,353 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39515-0x1015469e0fb0003, quorum=127.0.0.1:56865, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-19T12:44:55,353 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=39515 2024-11-19T12:44:55,354 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=39515 2024-11-19T12:44:55,354 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=39515 2024-11-19T12:44:55,359 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=39515 2024-11-19T12:44:55,359 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=39515 2024-11-19T12:44:55,379 DEBUG [M:0;aba5a916dfea:40479 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;aba5a916dfea:40479 2024-11-19T12:44:55,380 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/aba5a916dfea,40479,1732020294118 2024-11-19T12:44:55,397 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40479-0x1015469e0fb0000, quorum=127.0.0.1:56865, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-19T12:44:55,397 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39075-0x1015469e0fb0001, quorum=127.0.0.1:56865, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-19T12:44:55,397 DEBUG [pool-77-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39515-0x1015469e0fb0003, quorum=127.0.0.1:56865, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-19T12:44:55,397 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32985-0x1015469e0fb0002, quorum=127.0.0.1:56865, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-19T12:44:55,400 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:40479-0x1015469e0fb0000, quorum=127.0.0.1:56865, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/aba5a916dfea,40479,1732020294118 2024-11-19T12:44:55,430 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32985-0x1015469e0fb0002, quorum=127.0.0.1:56865, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-19T12:44:55,430 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39075-0x1015469e0fb0001, quorum=127.0.0.1:56865, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-19T12:44:55,430 DEBUG [pool-77-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39515-0x1015469e0fb0003, quorum=127.0.0.1:56865, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-19T12:44:55,430 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40479-0x1015469e0fb0000, quorum=127.0.0.1:56865, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T12:44:55,430 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32985-0x1015469e0fb0002, quorum=127.0.0.1:56865, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T12:44:55,430 DEBUG [pool-77-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39515-0x1015469e0fb0003, quorum=127.0.0.1:56865, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T12:44:55,431 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39075-0x1015469e0fb0001, quorum=127.0.0.1:56865, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T12:44:55,431 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:40479-0x1015469e0fb0000, quorum=127.0.0.1:56865, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-19T12:44:55,432 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/aba5a916dfea,40479,1732020294118 from backup master directory 2024-11-19T12:44:55,442 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40479-0x1015469e0fb0000, quorum=127.0.0.1:56865, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/aba5a916dfea,40479,1732020294118 2024-11-19T12:44:55,442 DEBUG [pool-77-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39515-0x1015469e0fb0003, quorum=127.0.0.1:56865, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-19T12:44:55,442 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32985-0x1015469e0fb0002, quorum=127.0.0.1:56865, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-19T12:44:55,442 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39075-0x1015469e0fb0001, quorum=127.0.0.1:56865, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-19T12:44:55,442 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40479-0x1015469e0fb0000, quorum=127.0.0.1:56865, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-19T12:44:55,442 WARN [master/aba5a916dfea:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-19T12:44:55,443 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=aba5a916dfea,40479,1732020294118 2024-11-19T12:44:55,445 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-11-19T12:44:55,447 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-11-19T12:44:55,517 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:44033/user/jenkins/test-data/462b85e5-3bd6-25b7-7743-cf7d8d82e30b/hbase.id] with ID: b0edeebf-8ab8-44bb-b7ae-5c5d0013f53d 2024-11-19T12:44:55,517 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:44033/user/jenkins/test-data/462b85e5-3bd6-25b7-7743-cf7d8d82e30b/.tmp/hbase.id 2024-11-19T12:44:55,524 WARN [master/aba5a916dfea:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-19T12:44:55,524 WARN [master/aba5a916dfea:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-19T12:44:55,528 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_165084278_22 at /127.0.0.1:49126 [Receiving block BP-1627058744-172.17.0.2-1732020289496:blk_-9223372036854775776_1003] {}] datanode.DataXceiver(331): 127.0.0.1:36121:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:49126 dst: /127.0.0.1:36121 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:44:55,534 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36121 is added to blk_-9223372036854775776_1004 (size=42) 2024-11-19T12:44:55,536 WARN [master/aba5a916dfea:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-19T12:44:55,537 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:44033/user/jenkins/test-data/462b85e5-3bd6-25b7-7743-cf7d8d82e30b/.tmp/hbase.id]:[hdfs://localhost:44033/user/jenkins/test-data/462b85e5-3bd6-25b7-7743-cf7d8d82e30b/hbase.id] 2024-11-19T12:44:55,589 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T12:44:55,596 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-19T12:44:55,622 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 24ms. 2024-11-19T12:44:55,650 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40479-0x1015469e0fb0000, quorum=127.0.0.1:56865, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T12:44:55,650 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39075-0x1015469e0fb0001, quorum=127.0.0.1:56865, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T12:44:55,650 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32985-0x1015469e0fb0002, quorum=127.0.0.1:56865, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T12:44:55,650 DEBUG [pool-77-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39515-0x1015469e0fb0003, quorum=127.0.0.1:56865, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T12:44:55,671 WARN [master/aba5a916dfea:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-19T12:44:55,671 WARN [master/aba5a916dfea:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-19T12:44:55,675 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_165084278_22 at /127.0.0.1:42834 [Receiving block BP-1627058744-172.17.0.2-1732020289496:blk_-9223372036854775760_1005] {}] datanode.DataXceiver(331): 127.0.0.1:44341:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:42834 dst: /127.0.0.1:44341 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:44:55,684 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44341 is added to blk_-9223372036854775760_1006 (size=196) 2024-11-19T12:44:55,687 WARN [master/aba5a916dfea:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-19T12:44:55,705 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-19T12:44:55,708 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-19T12:44:55,715 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-19T12:44:55,747 WARN [master/aba5a916dfea:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-19T12:44:55,748 WARN [master/aba5a916dfea:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-19T12:44:55,755 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_165084278_22 at /127.0.0.1:42858 [Receiving block BP-1627058744-172.17.0.2-1732020289496:blk_-9223372036854775744_1007] {}] datanode.DataXceiver(331): 127.0.0.1:44341:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:42858 dst: /127.0.0.1:44341 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:44:55,764 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44341 is added to blk_-9223372036854775744_1008 (size=1189) 2024-11-19T12:44:55,765 WARN [master/aba5a916dfea:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-19T12:44:55,789 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:44033/user/jenkins/test-data/462b85e5-3bd6-25b7-7743-cf7d8d82e30b/MasterData/data/master/store 2024-11-19T12:44:55,814 WARN [master/aba5a916dfea:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-19T12:44:55,814 WARN [master/aba5a916dfea:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-19T12:44:55,822 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_165084278_22 at /127.0.0.1:49160 [Receiving block BP-1627058744-172.17.0.2-1732020289496:blk_-9223372036854775728_1009] {}] datanode.DataXceiver(331): 127.0.0.1:36121:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:49160 dst: /127.0.0.1:36121 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:44:55,828 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36121 is added to blk_-9223372036854775728_1010 (size=34) 2024-11-19T12:44:55,830 WARN [master/aba5a916dfea:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-19T12:44:55,835 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-11-19T12:44:55,839 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T12:44:55,840 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-19T12:44:55,840 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T12:44:55,841 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T12:44:55,843 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-19T12:44:55,843 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T12:44:55,843 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T12:44:55,844 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732020295840Disabling compacts and flushes for region at 1732020295840Disabling writes for close at 1732020295843 (+3 ms)Writing region close event to WAL at 1732020295843Closed at 1732020295843 2024-11-19T12:44:55,846 WARN [master/aba5a916dfea:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:44033/user/jenkins/test-data/462b85e5-3bd6-25b7-7743-cf7d8d82e30b/MasterData/data/master/store/.initializing 2024-11-19T12:44:55,846 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:44033/user/jenkins/test-data/462b85e5-3bd6-25b7-7743-cf7d8d82e30b/MasterData/WALs/aba5a916dfea,40479,1732020294118 2024-11-19T12:44:55,856 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-19T12:44:55,872 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=aba5a916dfea%2C40479%2C1732020294118, suffix=, logDir=hdfs://localhost:44033/user/jenkins/test-data/462b85e5-3bd6-25b7-7743-cf7d8d82e30b/MasterData/WALs/aba5a916dfea,40479,1732020294118, archiveDir=hdfs://localhost:44033/user/jenkins/test-data/462b85e5-3bd6-25b7-7743-cf7d8d82e30b/MasterData/oldWALs, maxLogs=10 2024-11-19T12:44:55,891 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36121 is added to blk_-9223372036854775789_1002 (size=7) 2024-11-19T12:44:55,891 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38679 is added to blk_-9223372036854775788_1002 (size=7) 2024-11-19T12:44:55,906 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/462b85e5-3bd6-25b7-7743-cf7d8d82e30b/MasterData/WALs/aba5a916dfea,40479,1732020294118/aba5a916dfea%2C40479%2C1732020294118.1732020295877, exclude list is [], retry=0 2024-11-19T12:44:55,925 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(244): No decryptEncryptedDataEncryptionKey method in DFSClient, should be hadoop version with HDFS-12396 java.lang.NoSuchMethodException: org.apache.hadoop.hdfs.DFSClient.decryptEncryptedDataEncryptionKey(org.apache.hadoop.fs.FileEncryptionInfo) at java.lang.Class.getDeclaredMethod(Class.java:2675) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelperWithoutHDFS12396(FanOutOneBlockAsyncDFSOutputSaslHelper.java:183) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelper(FanOutOneBlockAsyncDFSOutputSaslHelper.java:242) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.(FanOutOneBlockAsyncDFSOutputSaslHelper.java:253) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.initialize(FanOutOneBlockAsyncDFSOutputHelper.java:413) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper$5.operationComplete(FanOutOneBlockAsyncDFSOutputHelper.java:472) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper$5.operationComplete(FanOutOneBlockAsyncDFSOutputHelper.java:467) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.NettyFutureUtils.lambda$addListener$0(NettyFutureUtils.java:56) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListener0(DefaultPromise.java:590) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListeners0(DefaultPromise.java:583) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListenersNow(DefaultPromise.java:559) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListeners(DefaultPromise.java:492) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.setValue0(DefaultPromise.java:636) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.setSuccess0(DefaultPromise.java:625) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.trySuccess(DefaultPromise.java:105) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPromise.trySuccess(DefaultChannelPromise.java:84) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.fulfillConnectPromise(AbstractEpollChannel.java:658) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.finishConnect(AbstractEpollChannel.java:696) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.epollOutReady(AbstractEpollChannel.java:567) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.processReady(EpollEventLoop.java:491) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:399) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:44:55,927 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44341,DS-43572478-5164-419f-a9eb-305ca567bf8f,DISK] 2024-11-19T12:44:55,927 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38679,DS-89d00d59-1d1c-4772-a423-c4ee7d398bc1,DISK] 2024-11-19T12:44:55,927 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:36121,DS-7d1377b4-6a4e-4066-8877-0991cc7bfade,DISK] 2024-11-19T12:44:55,930 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.ProtobufDecoder(117): Hadoop 3.3 and above shades protobuf. 2024-11-19T12:44:55,975 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/462b85e5-3bd6-25b7-7743-cf7d8d82e30b/MasterData/WALs/aba5a916dfea,40479,1732020294118/aba5a916dfea%2C40479%2C1732020294118.1732020295877 2024-11-19T12:44:55,976 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:40853:40853),(127.0.0.1/127.0.0.1:42319:42319),(127.0.0.1/127.0.0.1:45439:45439)] 2024-11-19T12:44:55,976 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-19T12:44:55,977 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T12:44:55,980 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T12:44:55,981 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T12:44:56,024 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-19T12:44:56,054 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-19T12:44:56,059 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:44:56,061 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T12:44:56,062 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-19T12:44:56,066 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-19T12:44:56,066 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:44:56,067 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-19T12:44:56,067 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-19T12:44:56,070 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-19T12:44:56,070 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:44:56,071 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-19T12:44:56,071 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-19T12:44:56,074 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-19T12:44:56,074 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:44:56,075 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-19T12:44:56,075 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T12:44:56,079 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44033/user/jenkins/test-data/462b85e5-3bd6-25b7-7743-cf7d8d82e30b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-19T12:44:56,081 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44033/user/jenkins/test-data/462b85e5-3bd6-25b7-7743-cf7d8d82e30b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-19T12:44:56,086 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T12:44:56,087 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T12:44:56,090 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-19T12:44:56,093 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T12:44:56,102 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44033/user/jenkins/test-data/462b85e5-3bd6-25b7-7743-cf7d8d82e30b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-19T12:44:56,104 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=64114984, jitterRate=-0.04461228847503662}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-19T12:44:56,111 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732020295995Initializing all the Stores at 1732020295997 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732020295997Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732020295998 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732020295998Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732020295998Cleaning up temporary data from old regions at 1732020296087 (+89 ms)Region opened successfully at 1732020296111 (+24 ms) 2024-11-19T12:44:56,113 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-19T12:44:56,149 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@383b1fae, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=aba5a916dfea/172.17.0.2:0 2024-11-19T12:44:56,181 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-19T12:44:56,192 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-19T12:44:56,192 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-19T12:44:56,195 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-19T12:44:56,196 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 1 msec 2024-11-19T12:44:56,200 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 4 msec 2024-11-19T12:44:56,201 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-19T12:44:56,231 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-19T12:44:56,241 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40479-0x1015469e0fb0000, quorum=127.0.0.1:56865, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-19T12:44:56,291 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-19T12:44:56,295 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-19T12:44:56,296 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40479-0x1015469e0fb0000, quorum=127.0.0.1:56865, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-19T12:44:56,308 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-19T12:44:56,311 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-19T12:44:56,315 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40479-0x1015469e0fb0000, quorum=127.0.0.1:56865, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-19T12:44:56,325 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-19T12:44:56,327 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40479-0x1015469e0fb0000, quorum=127.0.0.1:56865, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-19T12:44:56,338 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-19T12:44:56,358 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40479-0x1015469e0fb0000, quorum=127.0.0.1:56865, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-19T12:44:56,366 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-19T12:44:56,380 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40479-0x1015469e0fb0000, quorum=127.0.0.1:56865, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-19T12:44:56,380 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32985-0x1015469e0fb0002, quorum=127.0.0.1:56865, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-19T12:44:56,380 DEBUG [pool-77-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39515-0x1015469e0fb0003, quorum=127.0.0.1:56865, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-19T12:44:56,380 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39075-0x1015469e0fb0001, quorum=127.0.0.1:56865, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-19T12:44:56,380 DEBUG [pool-77-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39515-0x1015469e0fb0003, quorum=127.0.0.1:56865, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T12:44:56,380 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32985-0x1015469e0fb0002, quorum=127.0.0.1:56865, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T12:44:56,381 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39075-0x1015469e0fb0001, quorum=127.0.0.1:56865, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T12:44:56,381 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40479-0x1015469e0fb0000, quorum=127.0.0.1:56865, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T12:44:56,385 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=aba5a916dfea,40479,1732020294118, sessionid=0x1015469e0fb0000, setting cluster-up flag (Was=false) 2024-11-19T12:44:56,417 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40479-0x1015469e0fb0000, quorum=127.0.0.1:56865, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T12:44:56,417 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39075-0x1015469e0fb0001, quorum=127.0.0.1:56865, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T12:44:56,417 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32985-0x1015469e0fb0002, quorum=127.0.0.1:56865, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T12:44:56,417 DEBUG [pool-77-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39515-0x1015469e0fb0003, quorum=127.0.0.1:56865, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T12:44:56,441 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-19T12:44:56,443 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=aba5a916dfea,40479,1732020294118 2024-11-19T12:44:56,466 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39075-0x1015469e0fb0001, quorum=127.0.0.1:56865, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T12:44:56,466 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32985-0x1015469e0fb0002, quorum=127.0.0.1:56865, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T12:44:56,466 DEBUG [pool-77-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39515-0x1015469e0fb0003, quorum=127.0.0.1:56865, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T12:44:56,466 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40479-0x1015469e0fb0000, quorum=127.0.0.1:56865, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T12:44:56,491 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-19T12:44:56,494 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=aba5a916dfea,40479,1732020294118 2024-11-19T12:44:56,504 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:44033/user/jenkins/test-data/462b85e5-3bd6-25b7-7743-cf7d8d82e30b/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-19T12:44:56,563 INFO [RS:1;aba5a916dfea:32985 {}] regionserver.HRegionServer(746): ClusterId : b0edeebf-8ab8-44bb-b7ae-5c5d0013f53d 2024-11-19T12:44:56,563 INFO [RS:2;aba5a916dfea:39515 {}] regionserver.HRegionServer(746): ClusterId : b0edeebf-8ab8-44bb-b7ae-5c5d0013f53d 2024-11-19T12:44:56,565 INFO [RS:0;aba5a916dfea:39075 {}] regionserver.HRegionServer(746): ClusterId : b0edeebf-8ab8-44bb-b7ae-5c5d0013f53d 2024-11-19T12:44:56,566 DEBUG [RS:1;aba5a916dfea:32985 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-19T12:44:56,566 DEBUG [RS:0;aba5a916dfea:39075 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-19T12:44:56,566 DEBUG [RS:2;aba5a916dfea:39515 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-19T12:44:56,585 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-19T12:44:56,588 DEBUG [RS:0;aba5a916dfea:39075 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-19T12:44:56,588 DEBUG [RS:1;aba5a916dfea:32985 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-19T12:44:56,588 DEBUG [RS:2;aba5a916dfea:39515 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-19T12:44:56,588 DEBUG [RS:0;aba5a916dfea:39075 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-19T12:44:56,588 DEBUG [RS:1;aba5a916dfea:32985 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-19T12:44:56,588 DEBUG [RS:2;aba5a916dfea:39515 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-19T12:44:56,595 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-19T12:44:56,603 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-19T12:44:56,609 DEBUG [RS:1;aba5a916dfea:32985 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-19T12:44:56,609 DEBUG [RS:2;aba5a916dfea:39515 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-19T12:44:56,610 DEBUG [RS:1;aba5a916dfea:32985 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@35339278, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=aba5a916dfea/172.17.0.2:0 2024-11-19T12:44:56,610 DEBUG [RS:2;aba5a916dfea:39515 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@25a39091, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=aba5a916dfea/172.17.0.2:0 2024-11-19T12:44:56,611 DEBUG [RS:0;aba5a916dfea:39075 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-19T12:44:56,611 DEBUG [RS:0;aba5a916dfea:39075 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@63e3d8d4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=aba5a916dfea/172.17.0.2:0 2024-11-19T12:44:56,609 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: aba5a916dfea,40479,1732020294118 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-19T12:44:56,618 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/aba5a916dfea:0, corePoolSize=5, maxPoolSize=5 2024-11-19T12:44:56,618 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/aba5a916dfea:0, corePoolSize=5, maxPoolSize=5 2024-11-19T12:44:56,618 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/aba5a916dfea:0, corePoolSize=5, maxPoolSize=5 2024-11-19T12:44:56,618 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/aba5a916dfea:0, corePoolSize=5, maxPoolSize=5 2024-11-19T12:44:56,619 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/aba5a916dfea:0, corePoolSize=10, maxPoolSize=10 2024-11-19T12:44:56,619 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/aba5a916dfea:0, corePoolSize=1, maxPoolSize=1 2024-11-19T12:44:56,619 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/aba5a916dfea:0, corePoolSize=2, maxPoolSize=2 2024-11-19T12:44:56,619 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/aba5a916dfea:0, corePoolSize=1, maxPoolSize=1 2024-11-19T12:44:56,625 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-19T12:44:56,626 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-19T12:44:56,626 DEBUG [RS:1;aba5a916dfea:32985 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;aba5a916dfea:32985 2024-11-19T12:44:56,627 DEBUG [RS:2;aba5a916dfea:39515 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:2;aba5a916dfea:39515 2024-11-19T12:44:56,627 DEBUG [RS:0;aba5a916dfea:39075 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;aba5a916dfea:39075 2024-11-19T12:44:56,631 INFO [RS:1;aba5a916dfea:32985 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-19T12:44:56,631 INFO [RS:1;aba5a916dfea:32985 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-19T12:44:56,631 DEBUG [RS:1;aba5a916dfea:32985 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-19T12:44:56,634 INFO [RS:1;aba5a916dfea:32985 {}] regionserver.HRegionServer(2659): reportForDuty to master=aba5a916dfea,40479,1732020294118 with port=32985, startcode=1732020295240 2024-11-19T12:44:56,634 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:44:56,634 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-19T12:44:56,636 INFO [RS:2;aba5a916dfea:39515 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-19T12:44:56,636 INFO [RS:2;aba5a916dfea:39515 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-19T12:44:56,637 DEBUG [RS:2;aba5a916dfea:39515 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-19T12:44:56,637 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732020326637 2024-11-19T12:44:56,638 INFO [RS:0;aba5a916dfea:39075 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-19T12:44:56,638 INFO [RS:0;aba5a916dfea:39075 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-19T12:44:56,639 DEBUG [RS:0;aba5a916dfea:39075 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-19T12:44:56,639 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-19T12:44:56,640 INFO [RS:0;aba5a916dfea:39075 {}] regionserver.HRegionServer(2659): reportForDuty to master=aba5a916dfea,40479,1732020294118 with port=39075, startcode=1732020295087 2024-11-19T12:44:56,641 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-19T12:44:56,644 INFO [RS:2;aba5a916dfea:39515 {}] regionserver.HRegionServer(2659): reportForDuty to master=aba5a916dfea,40479,1732020294118 with port=39515, startcode=1732020295317 2024-11-19T12:44:56,645 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-19T12:44:56,645 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-19T12:44:56,645 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-19T12:44:56,646 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-19T12:44:56,646 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-19T12:44:56,646 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-19T12:44:56,648 DEBUG [RS:2;aba5a916dfea:39515 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-19T12:44:56,648 DEBUG [RS:0;aba5a916dfea:39075 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-19T12:44:56,648 DEBUG [RS:1;aba5a916dfea:32985 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-19T12:44:56,647 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-19T12:44:56,654 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_165084278_22 at /127.0.0.1:49190 [Receiving block BP-1627058744-172.17.0.2-1732020289496:blk_-9223372036854775712_1012] {}] datanode.DataXceiver(331): 127.0.0.1:36121:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:49190 dst: /127.0.0.1:36121 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:44:56,663 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-19T12:44:56,664 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-19T12:44:56,665 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-19T12:44:56,670 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-19T12:44:56,677 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-19T12:44:56,683 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36121 is added to blk_-9223372036854775712_1013 (size=1321) 2024-11-19T12:44:56,684 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-19T12:44:56,686 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:44033/user/jenkins/test-data/462b85e5-3bd6-25b7-7743-cf7d8d82e30b/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-19T12:44:56,686 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:44033/user/jenkins/test-data/462b85e5-3bd6-25b7-7743-cf7d8d82e30b 2024-11-19T12:44:56,686 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/aba5a916dfea:0:becomeActiveMaster-HFileCleaner.large.0-1732020296678,5,FailOnTimeoutGroup] 2024-11-19T12:44:56,687 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/aba5a916dfea:0:becomeActiveMaster-HFileCleaner.small.0-1732020296687,5,FailOnTimeoutGroup] 2024-11-19T12:44:56,687 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-19T12:44:56,688 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-19T12:44:56,689 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-19T12:44:56,690 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-19T12:44:56,706 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35099, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-11-19T12:44:56,706 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60807, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-11-19T12:44:56,707 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44953, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-11-19T12:44:56,707 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-19T12:44:56,707 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-19T12:44:56,713 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40479 {}] master.ServerManager(363): Checking decommissioned status of RegionServer aba5a916dfea,32985,1732020295240 2024-11-19T12:44:56,715 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40479 {}] master.ServerManager(517): Registering regionserver=aba5a916dfea,32985,1732020295240 2024-11-19T12:44:56,722 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_165084278_22 at /127.0.0.1:49198 [Receiving block BP-1627058744-172.17.0.2-1732020289496:blk_-9223372036854775696_1014] {}] datanode.DataXceiver(331): 127.0.0.1:36121:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:49198 dst: /127.0.0.1:36121 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:44:56,730 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40479 {}] master.ServerManager(363): Checking decommissioned status of RegionServer aba5a916dfea,39075,1732020295087 2024-11-19T12:44:56,731 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40479 {}] master.ServerManager(517): Registering regionserver=aba5a916dfea,39075,1732020295087 2024-11-19T12:44:56,735 DEBUG [RS:1;aba5a916dfea:32985 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:44033/user/jenkins/test-data/462b85e5-3bd6-25b7-7743-cf7d8d82e30b 2024-11-19T12:44:56,735 DEBUG [RS:1;aba5a916dfea:32985 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:44033 2024-11-19T12:44:56,735 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36121 is added to blk_-9223372036854775696_1015 (size=32) 2024-11-19T12:44:56,735 DEBUG [RS:1;aba5a916dfea:32985 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-19T12:44:56,737 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-19T12:44:56,737 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40479 {}] master.ServerManager(363): Checking decommissioned status of RegionServer aba5a916dfea,39515,1732020295317 2024-11-19T12:44:56,738 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40479 {}] master.ServerManager(517): Registering regionserver=aba5a916dfea,39515,1732020295317 2024-11-19T12:44:56,738 DEBUG [RS:0;aba5a916dfea:39075 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:44033/user/jenkins/test-data/462b85e5-3bd6-25b7-7743-cf7d8d82e30b 2024-11-19T12:44:56,738 DEBUG [RS:0;aba5a916dfea:39075 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:44033 2024-11-19T12:44:56,738 DEBUG [RS:0;aba5a916dfea:39075 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-19T12:44:56,738 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T12:44:56,743 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-19T12:44:56,744 DEBUG [RS:2;aba5a916dfea:39515 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:44033/user/jenkins/test-data/462b85e5-3bd6-25b7-7743-cf7d8d82e30b 2024-11-19T12:44:56,744 DEBUG [RS:2;aba5a916dfea:39515 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:44033 2024-11-19T12:44:56,744 DEBUG [RS:2;aba5a916dfea:39515 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-19T12:44:56,747 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-19T12:44:56,747 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:44:56,748 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T12:44:56,749 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-19T12:44:56,754 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-19T12:44:56,754 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:44:56,756 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T12:44:56,756 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-19T12:44:56,760 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-19T12:44:56,760 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:44:56,761 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T12:44:56,762 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-19T12:44:56,765 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-19T12:44:56,765 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:44:56,766 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T12:44:56,767 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-19T12:44:56,768 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44033/user/jenkins/test-data/462b85e5-3bd6-25b7-7743-cf7d8d82e30b/data/hbase/meta/1588230740 2024-11-19T12:44:56,769 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44033/user/jenkins/test-data/462b85e5-3bd6-25b7-7743-cf7d8d82e30b/data/hbase/meta/1588230740 2024-11-19T12:44:56,773 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-19T12:44:56,774 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-19T12:44:56,775 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-19T12:44:56,779 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-19T12:44:56,789 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40479-0x1015469e0fb0000, quorum=127.0.0.1:56865, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-19T12:44:56,800 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44033/user/jenkins/test-data/462b85e5-3bd6-25b7-7743-cf7d8d82e30b/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-19T12:44:56,801 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=65177773, jitterRate=-0.028775498270988464}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-19T12:44:56,805 DEBUG [RS:0;aba5a916dfea:39075 {}] zookeeper.ZKUtil(111): regionserver:39075-0x1015469e0fb0001, quorum=127.0.0.1:56865, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/aba5a916dfea,39075,1732020295087 2024-11-19T12:44:56,805 DEBUG [RS:1;aba5a916dfea:32985 {}] zookeeper.ZKUtil(111): regionserver:32985-0x1015469e0fb0002, quorum=127.0.0.1:56865, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/aba5a916dfea,32985,1732020295240 2024-11-19T12:44:56,805 WARN [RS:0;aba5a916dfea:39075 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-19T12:44:56,806 WARN [RS:1;aba5a916dfea:32985 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-19T12:44:56,806 INFO [RS:1;aba5a916dfea:32985 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-19T12:44:56,806 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732020296739Initializing all the Stores at 1732020296742 (+3 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732020296742Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732020296742Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732020296743 (+1 ms)Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732020296743Cleaning up temporary data from old regions at 1732020296774 (+31 ms)Region opened successfully at 1732020296806 (+32 ms) 2024-11-19T12:44:56,806 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-19T12:44:56,806 DEBUG [RS:1;aba5a916dfea:32985 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:44033/user/jenkins/test-data/462b85e5-3bd6-25b7-7743-cf7d8d82e30b/WALs/aba5a916dfea,32985,1732020295240 2024-11-19T12:44:56,806 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-19T12:44:56,806 INFO [RS:0;aba5a916dfea:39075 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-19T12:44:56,806 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-19T12:44:56,806 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-19T12:44:56,806 DEBUG [RS:0;aba5a916dfea:39075 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:44033/user/jenkins/test-data/462b85e5-3bd6-25b7-7743-cf7d8d82e30b/WALs/aba5a916dfea,39075,1732020295087 2024-11-19T12:44:56,806 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-19T12:44:56,807 DEBUG [RS:2;aba5a916dfea:39515 {}] zookeeper.ZKUtil(111): regionserver:39515-0x1015469e0fb0003, quorum=127.0.0.1:56865, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/aba5a916dfea,39515,1732020295317 2024-11-19T12:44:56,807 WARN [RS:2;aba5a916dfea:39515 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-19T12:44:56,807 INFO [RS:2;aba5a916dfea:39515 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-19T12:44:56,807 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [aba5a916dfea,39515,1732020295317] 2024-11-19T12:44:56,807 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [aba5a916dfea,32985,1732020295240] 2024-11-19T12:44:56,807 DEBUG [RS:2;aba5a916dfea:39515 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:44033/user/jenkins/test-data/462b85e5-3bd6-25b7-7743-cf7d8d82e30b/WALs/aba5a916dfea,39515,1732020295317 2024-11-19T12:44:56,808 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [aba5a916dfea,39075,1732020295087] 2024-11-19T12:44:56,810 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-19T12:44:56,811 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732020296806Disabling compacts and flushes for region at 1732020296806Disabling writes for close at 1732020296806Writing region close event to WAL at 1732020296810 (+4 ms)Closed at 1732020296810 2024-11-19T12:44:56,824 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-19T12:44:56,824 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-19T12:44:56,832 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-19T12:44:56,839 INFO [RS:1;aba5a916dfea:32985 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-19T12:44:56,839 INFO [RS:0;aba5a916dfea:39075 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-19T12:44:56,839 INFO [RS:2;aba5a916dfea:39515 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-19T12:44:56,843 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-19T12:44:56,850 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-19T12:44:56,869 INFO [RS:2;aba5a916dfea:39515 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-19T12:44:56,869 INFO [RS:0;aba5a916dfea:39075 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-19T12:44:56,870 INFO [RS:1;aba5a916dfea:32985 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-19T12:44:56,879 INFO [RS:0;aba5a916dfea:39075 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-19T12:44:56,879 INFO [RS:2;aba5a916dfea:39515 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-19T12:44:56,879 INFO [RS:1;aba5a916dfea:32985 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-19T12:44:56,880 INFO [RS:0;aba5a916dfea:39075 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-19T12:44:56,880 INFO [RS:1;aba5a916dfea:32985 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-19T12:44:56,880 INFO [RS:2;aba5a916dfea:39515 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-19T12:44:56,883 INFO [RS:2;aba5a916dfea:39515 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-19T12:44:56,883 INFO [RS:0;aba5a916dfea:39075 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-19T12:44:56,883 INFO [RS:1;aba5a916dfea:32985 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-19T12:44:56,891 INFO [RS:2;aba5a916dfea:39515 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-19T12:44:56,891 INFO [RS:1;aba5a916dfea:32985 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-19T12:44:56,891 INFO [RS:0;aba5a916dfea:39075 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-19T12:44:56,893 INFO [RS:0;aba5a916dfea:39075 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-19T12:44:56,893 DEBUG [RS:0;aba5a916dfea:39075 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/aba5a916dfea:0, corePoolSize=1, maxPoolSize=1 2024-11-19T12:44:56,893 DEBUG [RS:0;aba5a916dfea:39075 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/aba5a916dfea:0, corePoolSize=1, maxPoolSize=1 2024-11-19T12:44:56,893 DEBUG [RS:0;aba5a916dfea:39075 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/aba5a916dfea:0, corePoolSize=1, maxPoolSize=1 2024-11-19T12:44:56,893 DEBUG [RS:0;aba5a916dfea:39075 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/aba5a916dfea:0, corePoolSize=1, maxPoolSize=1 2024-11-19T12:44:56,893 DEBUG [RS:0;aba5a916dfea:39075 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/aba5a916dfea:0, corePoolSize=1, maxPoolSize=1 2024-11-19T12:44:56,894 DEBUG [RS:0;aba5a916dfea:39075 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/aba5a916dfea:0, corePoolSize=2, maxPoolSize=2 2024-11-19T12:44:56,894 DEBUG [RS:0;aba5a916dfea:39075 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/aba5a916dfea:0, corePoolSize=1, maxPoolSize=1 2024-11-19T12:44:56,894 DEBUG [RS:0;aba5a916dfea:39075 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/aba5a916dfea:0, corePoolSize=1, maxPoolSize=1 2024-11-19T12:44:56,894 DEBUG [RS:0;aba5a916dfea:39075 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/aba5a916dfea:0, corePoolSize=1, maxPoolSize=1 2024-11-19T12:44:56,894 DEBUG [RS:0;aba5a916dfea:39075 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/aba5a916dfea:0, corePoolSize=1, maxPoolSize=1 2024-11-19T12:44:56,894 DEBUG [RS:0;aba5a916dfea:39075 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/aba5a916dfea:0, corePoolSize=1, maxPoolSize=1 2024-11-19T12:44:56,894 INFO [RS:1;aba5a916dfea:32985 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-19T12:44:56,894 INFO [RS:2;aba5a916dfea:39515 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-19T12:44:56,895 DEBUG [RS:0;aba5a916dfea:39075 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/aba5a916dfea:0, corePoolSize=1, maxPoolSize=1 2024-11-19T12:44:56,895 DEBUG [RS:1;aba5a916dfea:32985 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/aba5a916dfea:0, corePoolSize=1, maxPoolSize=1 2024-11-19T12:44:56,895 DEBUG [RS:0;aba5a916dfea:39075 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/aba5a916dfea:0, corePoolSize=3, maxPoolSize=3 2024-11-19T12:44:56,895 DEBUG [RS:1;aba5a916dfea:32985 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/aba5a916dfea:0, corePoolSize=1, maxPoolSize=1 2024-11-19T12:44:56,895 DEBUG [RS:0;aba5a916dfea:39075 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/aba5a916dfea:0, corePoolSize=3, maxPoolSize=3 2024-11-19T12:44:56,895 DEBUG [RS:2;aba5a916dfea:39515 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/aba5a916dfea:0, corePoolSize=1, maxPoolSize=1 2024-11-19T12:44:56,895 DEBUG [RS:1;aba5a916dfea:32985 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/aba5a916dfea:0, corePoolSize=1, maxPoolSize=1 2024-11-19T12:44:56,895 DEBUG [RS:2;aba5a916dfea:39515 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/aba5a916dfea:0, corePoolSize=1, maxPoolSize=1 2024-11-19T12:44:56,895 DEBUG [RS:1;aba5a916dfea:32985 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/aba5a916dfea:0, corePoolSize=1, maxPoolSize=1 2024-11-19T12:44:56,895 DEBUG [RS:2;aba5a916dfea:39515 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/aba5a916dfea:0, corePoolSize=1, maxPoolSize=1 2024-11-19T12:44:56,895 DEBUG [RS:1;aba5a916dfea:32985 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/aba5a916dfea:0, corePoolSize=1, maxPoolSize=1 2024-11-19T12:44:56,895 DEBUG [RS:2;aba5a916dfea:39515 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/aba5a916dfea:0, corePoolSize=1, maxPoolSize=1 2024-11-19T12:44:56,895 DEBUG [RS:1;aba5a916dfea:32985 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/aba5a916dfea:0, corePoolSize=2, maxPoolSize=2 2024-11-19T12:44:56,895 DEBUG [RS:2;aba5a916dfea:39515 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/aba5a916dfea:0, corePoolSize=1, maxPoolSize=1 2024-11-19T12:44:56,895 DEBUG [RS:1;aba5a916dfea:32985 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/aba5a916dfea:0, corePoolSize=1, maxPoolSize=1 2024-11-19T12:44:56,895 DEBUG [RS:2;aba5a916dfea:39515 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/aba5a916dfea:0, corePoolSize=2, maxPoolSize=2 2024-11-19T12:44:56,896 DEBUG [RS:1;aba5a916dfea:32985 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/aba5a916dfea:0, corePoolSize=1, maxPoolSize=1 2024-11-19T12:44:56,896 DEBUG [RS:2;aba5a916dfea:39515 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/aba5a916dfea:0, corePoolSize=1, maxPoolSize=1 2024-11-19T12:44:56,896 DEBUG [RS:1;aba5a916dfea:32985 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/aba5a916dfea:0, corePoolSize=1, maxPoolSize=1 2024-11-19T12:44:56,896 DEBUG [RS:1;aba5a916dfea:32985 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/aba5a916dfea:0, corePoolSize=1, maxPoolSize=1 2024-11-19T12:44:56,896 DEBUG [RS:2;aba5a916dfea:39515 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/aba5a916dfea:0, corePoolSize=1, maxPoolSize=1 2024-11-19T12:44:56,896 DEBUG [RS:1;aba5a916dfea:32985 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/aba5a916dfea:0, corePoolSize=1, maxPoolSize=1 2024-11-19T12:44:56,896 DEBUG [RS:2;aba5a916dfea:39515 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/aba5a916dfea:0, corePoolSize=1, maxPoolSize=1 2024-11-19T12:44:56,896 DEBUG [RS:1;aba5a916dfea:32985 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/aba5a916dfea:0, corePoolSize=1, maxPoolSize=1 2024-11-19T12:44:56,896 DEBUG [RS:2;aba5a916dfea:39515 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/aba5a916dfea:0, corePoolSize=1, maxPoolSize=1 2024-11-19T12:44:56,896 DEBUG [RS:1;aba5a916dfea:32985 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/aba5a916dfea:0, corePoolSize=3, maxPoolSize=3 2024-11-19T12:44:56,896 DEBUG [RS:2;aba5a916dfea:39515 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/aba5a916dfea:0, corePoolSize=1, maxPoolSize=1 2024-11-19T12:44:56,896 DEBUG [RS:2;aba5a916dfea:39515 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/aba5a916dfea:0, corePoolSize=1, maxPoolSize=1 2024-11-19T12:44:56,896 DEBUG [RS:1;aba5a916dfea:32985 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/aba5a916dfea:0, corePoolSize=3, maxPoolSize=3 2024-11-19T12:44:56,896 DEBUG [RS:2;aba5a916dfea:39515 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/aba5a916dfea:0, corePoolSize=3, maxPoolSize=3 2024-11-19T12:44:56,897 DEBUG [RS:2;aba5a916dfea:39515 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/aba5a916dfea:0, corePoolSize=3, maxPoolSize=3 2024-11-19T12:44:56,905 INFO [RS:0;aba5a916dfea:39075 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-19T12:44:56,906 INFO [RS:0;aba5a916dfea:39075 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-19T12:44:56,906 INFO [RS:1;aba5a916dfea:32985 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-19T12:44:56,906 INFO [RS:0;aba5a916dfea:39075 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-19T12:44:56,906 INFO [RS:1;aba5a916dfea:32985 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-19T12:44:56,906 INFO [RS:0;aba5a916dfea:39075 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-19T12:44:56,906 INFO [RS:1;aba5a916dfea:32985 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-19T12:44:56,906 INFO [RS:0;aba5a916dfea:39075 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-19T12:44:56,906 INFO [RS:0;aba5a916dfea:39075 {}] hbase.ChoreService(168): Chore ScheduledChore name=aba5a916dfea,39075,1732020295087-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-19T12:44:56,906 INFO [RS:1;aba5a916dfea:32985 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-19T12:44:56,907 INFO [RS:1;aba5a916dfea:32985 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-19T12:44:56,907 INFO [RS:1;aba5a916dfea:32985 {}] hbase.ChoreService(168): Chore ScheduledChore name=aba5a916dfea,32985,1732020295240-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-19T12:44:56,910 INFO [RS:2;aba5a916dfea:39515 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-19T12:44:56,911 INFO [RS:2;aba5a916dfea:39515 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-19T12:44:56,911 INFO [RS:2;aba5a916dfea:39515 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-19T12:44:56,911 INFO [RS:2;aba5a916dfea:39515 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-19T12:44:56,911 INFO [RS:2;aba5a916dfea:39515 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-19T12:44:56,911 INFO [RS:2;aba5a916dfea:39515 {}] hbase.ChoreService(168): Chore ScheduledChore name=aba5a916dfea,39515,1732020295317-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-19T12:44:56,934 INFO [RS:0;aba5a916dfea:39075 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-19T12:44:56,937 INFO [RS:0;aba5a916dfea:39075 {}] hbase.ChoreService(168): Chore ScheduledChore name=aba5a916dfea,39075,1732020295087-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-19T12:44:56,937 INFO [RS:0;aba5a916dfea:39075 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T12:44:56,937 INFO [RS:0;aba5a916dfea:39075 {}] regionserver.Replication(171): aba5a916dfea,39075,1732020295087 started 2024-11-19T12:44:56,937 INFO [RS:2;aba5a916dfea:39515 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-19T12:44:56,938 INFO [RS:2;aba5a916dfea:39515 {}] hbase.ChoreService(168): Chore ScheduledChore name=aba5a916dfea,39515,1732020295317-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-19T12:44:56,938 INFO [RS:2;aba5a916dfea:39515 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T12:44:56,938 INFO [RS:2;aba5a916dfea:39515 {}] regionserver.Replication(171): aba5a916dfea,39515,1732020295317 started 2024-11-19T12:44:56,939 INFO [RS:1;aba5a916dfea:32985 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-19T12:44:56,939 INFO [RS:1;aba5a916dfea:32985 {}] hbase.ChoreService(168): Chore ScheduledChore name=aba5a916dfea,32985,1732020295240-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-19T12:44:56,939 INFO [RS:1;aba5a916dfea:32985 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T12:44:56,939 INFO [RS:1;aba5a916dfea:32985 {}] regionserver.Replication(171): aba5a916dfea,32985,1732020295240 started 2024-11-19T12:44:56,961 INFO [RS:2;aba5a916dfea:39515 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T12:44:56,961 INFO [RS:0;aba5a916dfea:39075 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T12:44:56,961 INFO [RS:1;aba5a916dfea:32985 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T12:44:56,962 INFO [RS:1;aba5a916dfea:32985 {}] regionserver.HRegionServer(1482): Serving as aba5a916dfea,32985,1732020295240, RpcServer on aba5a916dfea/172.17.0.2:32985, sessionid=0x1015469e0fb0002 2024-11-19T12:44:56,962 INFO [RS:2;aba5a916dfea:39515 {}] regionserver.HRegionServer(1482): Serving as aba5a916dfea,39515,1732020295317, RpcServer on aba5a916dfea/172.17.0.2:39515, sessionid=0x1015469e0fb0003 2024-11-19T12:44:56,962 INFO [RS:0;aba5a916dfea:39075 {}] regionserver.HRegionServer(1482): Serving as aba5a916dfea,39075,1732020295087, RpcServer on aba5a916dfea/172.17.0.2:39075, sessionid=0x1015469e0fb0001 2024-11-19T12:44:56,963 DEBUG [RS:0;aba5a916dfea:39075 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-19T12:44:56,963 DEBUG [RS:1;aba5a916dfea:32985 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-19T12:44:56,971 DEBUG [RS:1;aba5a916dfea:32985 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager aba5a916dfea,32985,1732020295240 2024-11-19T12:44:56,971 DEBUG [RS:0;aba5a916dfea:39075 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager aba5a916dfea,39075,1732020295087 2024-11-19T12:44:56,963 DEBUG [RS:2;aba5a916dfea:39515 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-19T12:44:56,971 DEBUG [RS:1;aba5a916dfea:32985 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'aba5a916dfea,32985,1732020295240' 2024-11-19T12:44:56,971 DEBUG [RS:0;aba5a916dfea:39075 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'aba5a916dfea,39075,1732020295087' 2024-11-19T12:44:56,971 DEBUG [RS:2;aba5a916dfea:39515 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager aba5a916dfea,39515,1732020295317 2024-11-19T12:44:56,971 DEBUG [RS:2;aba5a916dfea:39515 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'aba5a916dfea,39515,1732020295317' 2024-11-19T12:44:56,972 DEBUG [RS:1;aba5a916dfea:32985 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-19T12:44:56,972 DEBUG [RS:2;aba5a916dfea:39515 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-19T12:44:56,972 DEBUG [RS:0;aba5a916dfea:39075 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-19T12:44:56,973 DEBUG [RS:1;aba5a916dfea:32985 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-19T12:44:56,973 DEBUG [RS:2;aba5a916dfea:39515 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-19T12:44:56,974 DEBUG [RS:2;aba5a916dfea:39515 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-19T12:44:56,974 DEBUG [RS:1;aba5a916dfea:32985 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-19T12:44:56,973 DEBUG [RS:0;aba5a916dfea:39075 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-19T12:44:56,974 DEBUG [RS:1;aba5a916dfea:32985 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-19T12:44:56,974 DEBUG [RS:2;aba5a916dfea:39515 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-19T12:44:56,974 DEBUG [RS:1;aba5a916dfea:32985 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager aba5a916dfea,32985,1732020295240 2024-11-19T12:44:56,974 DEBUG [RS:1;aba5a916dfea:32985 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'aba5a916dfea,32985,1732020295240' 2024-11-19T12:44:56,974 DEBUG [RS:1;aba5a916dfea:32985 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-19T12:44:56,975 DEBUG [RS:2;aba5a916dfea:39515 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager aba5a916dfea,39515,1732020295317 2024-11-19T12:44:56,975 DEBUG [RS:2;aba5a916dfea:39515 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'aba5a916dfea,39515,1732020295317' 2024-11-19T12:44:56,975 DEBUG [RS:2;aba5a916dfea:39515 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-19T12:44:56,975 DEBUG [RS:1;aba5a916dfea:32985 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-19T12:44:56,975 DEBUG [RS:0;aba5a916dfea:39075 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-19T12:44:56,976 DEBUG [RS:0;aba5a916dfea:39075 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-19T12:44:56,976 DEBUG [RS:0;aba5a916dfea:39075 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager aba5a916dfea,39075,1732020295087 2024-11-19T12:44:56,976 DEBUG [RS:0;aba5a916dfea:39075 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'aba5a916dfea,39075,1732020295087' 2024-11-19T12:44:56,976 DEBUG [RS:0;aba5a916dfea:39075 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-19T12:44:56,976 DEBUG [RS:2;aba5a916dfea:39515 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-19T12:44:56,979 DEBUG [RS:1;aba5a916dfea:32985 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-19T12:44:56,979 DEBUG [RS:0;aba5a916dfea:39075 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-19T12:44:56,979 INFO [RS:1;aba5a916dfea:32985 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-19T12:44:56,979 INFO [RS:1;aba5a916dfea:32985 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-19T12:44:56,980 DEBUG [RS:2;aba5a916dfea:39515 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-19T12:44:56,980 INFO [RS:2;aba5a916dfea:39515 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-19T12:44:56,980 INFO [RS:2;aba5a916dfea:39515 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-19T12:44:56,980 DEBUG [RS:0;aba5a916dfea:39075 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-19T12:44:56,980 INFO [RS:0;aba5a916dfea:39075 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-19T12:44:56,980 INFO [RS:0;aba5a916dfea:39075 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-19T12:44:57,002 WARN [aba5a916dfea:40479 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-19T12:44:57,084 INFO [RS:2;aba5a916dfea:39515 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-19T12:44:57,084 INFO [RS:0;aba5a916dfea:39075 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-19T12:44:57,084 INFO [RS:1;aba5a916dfea:32985 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-19T12:44:57,087 INFO [RS:0;aba5a916dfea:39075 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=aba5a916dfea%2C39075%2C1732020295087, suffix=, logDir=hdfs://localhost:44033/user/jenkins/test-data/462b85e5-3bd6-25b7-7743-cf7d8d82e30b/WALs/aba5a916dfea,39075,1732020295087, archiveDir=hdfs://localhost:44033/user/jenkins/test-data/462b85e5-3bd6-25b7-7743-cf7d8d82e30b/oldWALs, maxLogs=32 2024-11-19T12:44:57,087 INFO [RS:2;aba5a916dfea:39515 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=aba5a916dfea%2C39515%2C1732020295317, suffix=, logDir=hdfs://localhost:44033/user/jenkins/test-data/462b85e5-3bd6-25b7-7743-cf7d8d82e30b/WALs/aba5a916dfea,39515,1732020295317, archiveDir=hdfs://localhost:44033/user/jenkins/test-data/462b85e5-3bd6-25b7-7743-cf7d8d82e30b/oldWALs, maxLogs=32 2024-11-19T12:44:57,088 INFO [RS:1;aba5a916dfea:32985 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=aba5a916dfea%2C32985%2C1732020295240, suffix=, logDir=hdfs://localhost:44033/user/jenkins/test-data/462b85e5-3bd6-25b7-7743-cf7d8d82e30b/WALs/aba5a916dfea,32985,1732020295240, archiveDir=hdfs://localhost:44033/user/jenkins/test-data/462b85e5-3bd6-25b7-7743-cf7d8d82e30b/oldWALs, maxLogs=32 2024-11-19T12:44:57,108 DEBUG [RS:0;aba5a916dfea:39075 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/462b85e5-3bd6-25b7-7743-cf7d8d82e30b/WALs/aba5a916dfea,39075,1732020295087/aba5a916dfea%2C39075%2C1732020295087.1732020297091, exclude list is [], retry=0 2024-11-19T12:44:57,109 DEBUG [RS:1;aba5a916dfea:32985 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/462b85e5-3bd6-25b7-7743-cf7d8d82e30b/WALs/aba5a916dfea,32985,1732020295240/aba5a916dfea%2C32985%2C1732020295240.1732020297091, exclude list is [], retry=0 2024-11-19T12:44:57,115 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38679,DS-89d00d59-1d1c-4772-a423-c4ee7d398bc1,DISK] 2024-11-19T12:44:57,115 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:36121,DS-7d1377b4-6a4e-4066-8877-0991cc7bfade,DISK] 2024-11-19T12:44:57,120 DEBUG [RS:2;aba5a916dfea:39515 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/462b85e5-3bd6-25b7-7743-cf7d8d82e30b/WALs/aba5a916dfea,39515,1732020295317/aba5a916dfea%2C39515%2C1732020295317.1732020297091, exclude list is [], retry=0 2024-11-19T12:44:57,120 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38679,DS-89d00d59-1d1c-4772-a423-c4ee7d398bc1,DISK] 2024-11-19T12:44:57,120 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:36121,DS-7d1377b4-6a4e-4066-8877-0991cc7bfade,DISK] 2024-11-19T12:44:57,120 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44341,DS-43572478-5164-419f-a9eb-305ca567bf8f,DISK] 2024-11-19T12:44:57,121 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44341,DS-43572478-5164-419f-a9eb-305ca567bf8f,DISK] 2024-11-19T12:44:57,148 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:36121,DS-7d1377b4-6a4e-4066-8877-0991cc7bfade,DISK] 2024-11-19T12:44:57,148 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38679,DS-89d00d59-1d1c-4772-a423-c4ee7d398bc1,DISK] 2024-11-19T12:44:57,149 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44341,DS-43572478-5164-419f-a9eb-305ca567bf8f,DISK] 2024-11-19T12:44:57,161 INFO [RS:0;aba5a916dfea:39075 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/462b85e5-3bd6-25b7-7743-cf7d8d82e30b/WALs/aba5a916dfea,39075,1732020295087/aba5a916dfea%2C39075%2C1732020295087.1732020297091 2024-11-19T12:44:57,161 INFO [RS:1;aba5a916dfea:32985 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/462b85e5-3bd6-25b7-7743-cf7d8d82e30b/WALs/aba5a916dfea,32985,1732020295240/aba5a916dfea%2C32985%2C1732020295240.1732020297091 2024-11-19T12:44:57,163 DEBUG [RS:1;aba5a916dfea:32985 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:42319:42319),(127.0.0.1/127.0.0.1:40853:40853),(127.0.0.1/127.0.0.1:45439:45439)] 2024-11-19T12:44:57,163 DEBUG [RS:0;aba5a916dfea:39075 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:42319:42319),(127.0.0.1/127.0.0.1:40853:40853),(127.0.0.1/127.0.0.1:45439:45439)] 2024-11-19T12:44:57,164 INFO [RS:2;aba5a916dfea:39515 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/462b85e5-3bd6-25b7-7743-cf7d8d82e30b/WALs/aba5a916dfea,39515,1732020295317/aba5a916dfea%2C39515%2C1732020295317.1732020297091 2024-11-19T12:44:57,169 DEBUG [RS:2;aba5a916dfea:39515 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:40853:40853),(127.0.0.1/127.0.0.1:42319:42319),(127.0.0.1/127.0.0.1:45439:45439)] 2024-11-19T12:44:57,255 DEBUG [aba5a916dfea:40479 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=3, allServersCount=3 2024-11-19T12:44:57,264 DEBUG [aba5a916dfea:40479 {}] balancer.BalancerClusterState(204): Hosts are {aba5a916dfea=0} racks are {/default-rack=0} 2024-11-19T12:44:57,271 DEBUG [aba5a916dfea:40479 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-19T12:44:57,271 DEBUG [aba5a916dfea:40479 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-19T12:44:57,271 DEBUG [aba5a916dfea:40479 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-19T12:44:57,271 DEBUG [aba5a916dfea:40479 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-19T12:44:57,271 DEBUG [aba5a916dfea:40479 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-19T12:44:57,271 DEBUG [aba5a916dfea:40479 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-19T12:44:57,271 INFO [aba5a916dfea:40479 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-19T12:44:57,271 INFO [aba5a916dfea:40479 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-19T12:44:57,271 INFO [aba5a916dfea:40479 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-19T12:44:57,271 DEBUG [aba5a916dfea:40479 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-19T12:44:57,280 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=aba5a916dfea,39515,1732020295317 2024-11-19T12:44:57,289 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as aba5a916dfea,39515,1732020295317, state=OPENING 2024-11-19T12:44:57,341 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-19T12:44:57,349 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39075-0x1015469e0fb0001, quorum=127.0.0.1:56865, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T12:44:57,349 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40479-0x1015469e0fb0000, quorum=127.0.0.1:56865, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T12:44:57,349 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32985-0x1015469e0fb0002, quorum=127.0.0.1:56865, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T12:44:57,349 DEBUG [pool-77-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39515-0x1015469e0fb0003, quorum=127.0.0.1:56865, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T12:44:57,350 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-19T12:44:57,350 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-19T12:44:57,350 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-19T12:44:57,351 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-19T12:44:57,353 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-19T12:44:57,356 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=aba5a916dfea,39515,1732020295317}] 2024-11-19T12:44:57,532 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-19T12:44:57,534 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54691, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-19T12:44:57,548 INFO [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-19T12:44:57,548 INFO [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-19T12:44:57,549 INFO [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-11-19T12:44:57,552 INFO [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=aba5a916dfea%2C39515%2C1732020295317.meta, suffix=.meta, logDir=hdfs://localhost:44033/user/jenkins/test-data/462b85e5-3bd6-25b7-7743-cf7d8d82e30b/WALs/aba5a916dfea,39515,1732020295317, archiveDir=hdfs://localhost:44033/user/jenkins/test-data/462b85e5-3bd6-25b7-7743-cf7d8d82e30b/oldWALs, maxLogs=32 2024-11-19T12:44:57,569 DEBUG [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/462b85e5-3bd6-25b7-7743-cf7d8d82e30b/WALs/aba5a916dfea,39515,1732020295317/aba5a916dfea%2C39515%2C1732020295317.meta.1732020297554.meta, exclude list is [], retry=0 2024-11-19T12:44:57,573 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38679,DS-89d00d59-1d1c-4772-a423-c4ee7d398bc1,DISK] 2024-11-19T12:44:57,573 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:36121,DS-7d1377b4-6a4e-4066-8877-0991cc7bfade,DISK] 2024-11-19T12:44:57,573 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44341,DS-43572478-5164-419f-a9eb-305ca567bf8f,DISK] 2024-11-19T12:44:57,582 INFO [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/462b85e5-3bd6-25b7-7743-cf7d8d82e30b/WALs/aba5a916dfea,39515,1732020295317/aba5a916dfea%2C39515%2C1732020295317.meta.1732020297554.meta 2024-11-19T12:44:57,587 DEBUG [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:40853:40853),(127.0.0.1/127.0.0.1:42319:42319),(127.0.0.1/127.0.0.1:45439:45439)] 2024-11-19T12:44:57,587 DEBUG [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-19T12:44:57,589 DEBUG [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-19T12:44:57,593 DEBUG [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-19T12:44:57,599 INFO [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-19T12:44:57,604 DEBUG [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-19T12:44:57,605 DEBUG [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T12:44:57,605 DEBUG [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-19T12:44:57,605 DEBUG [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-19T12:44:57,609 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-19T12:44:57,611 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-19T12:44:57,611 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:44:57,613 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T12:44:57,613 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-19T12:44:57,615 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-19T12:44:57,615 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:44:57,616 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T12:44:57,617 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-19T12:44:57,619 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-19T12:44:57,619 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:44:57,620 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T12:44:57,620 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-19T12:44:57,621 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-19T12:44:57,622 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:44:57,622 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T12:44:57,623 DEBUG [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-19T12:44:57,625 DEBUG [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44033/user/jenkins/test-data/462b85e5-3bd6-25b7-7743-cf7d8d82e30b/data/hbase/meta/1588230740 2024-11-19T12:44:57,629 DEBUG [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44033/user/jenkins/test-data/462b85e5-3bd6-25b7-7743-cf7d8d82e30b/data/hbase/meta/1588230740 2024-11-19T12:44:57,633 DEBUG [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-19T12:44:57,633 DEBUG [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-19T12:44:57,639 DEBUG [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-19T12:44:57,643 DEBUG [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-19T12:44:57,645 INFO [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=75066311, jitterRate=0.11857520043849945}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-19T12:44:57,645 DEBUG [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-19T12:44:57,647 DEBUG [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732020297606Writing region info on filesystem at 1732020297606Initializing all the Stores at 1732020297608 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732020297608Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732020297609 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732020297609Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732020297609Cleaning up temporary data from old regions at 1732020297633 (+24 ms)Running coprocessor post-open hooks at 1732020297645 (+12 ms)Region opened successfully at 1732020297647 (+2 ms) 2024-11-19T12:44:57,655 INFO [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732020297524 2024-11-19T12:44:57,669 DEBUG [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-19T12:44:57,669 INFO [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-19T12:44:57,674 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=aba5a916dfea,39515,1732020295317 2024-11-19T12:44:57,676 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as aba5a916dfea,39515,1732020295317, state=OPEN 2024-11-19T12:44:57,716 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32985-0x1015469e0fb0002, quorum=127.0.0.1:56865, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-19T12:44:57,716 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40479-0x1015469e0fb0000, quorum=127.0.0.1:56865, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-19T12:44:57,716 DEBUG [pool-77-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39515-0x1015469e0fb0003, quorum=127.0.0.1:56865, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-19T12:44:57,716 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39075-0x1015469e0fb0001, quorum=127.0.0.1:56865, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-19T12:44:57,716 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-19T12:44:57,716 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-19T12:44:57,716 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-19T12:44:57,716 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-19T12:44:57,717 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=aba5a916dfea,39515,1732020295317 2024-11-19T12:44:57,725 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-19T12:44:57,725 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=aba5a916dfea,39515,1732020295317 in 362 msec 2024-11-19T12:44:57,732 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-19T12:44:57,732 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 895 msec 2024-11-19T12:44:57,734 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-19T12:44:57,734 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-19T12:44:57,753 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-19T12:44:57,755 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=aba5a916dfea,39515,1732020295317, seqNum=-1] 2024-11-19T12:44:57,795 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-19T12:44:57,799 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34195, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-19T12:44:57,831 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.2940 sec 2024-11-19T12:44:57,832 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732020297831, completionTime=-1 2024-11-19T12:44:57,838 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=3; waited=0ms, expected min=3 server(s), max=3 server(s), master is running 2024-11-19T12:44:57,838 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-19T12:44:57,863 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=3 2024-11-19T12:44:57,864 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732020357863 2024-11-19T12:44:57,864 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732020417864 2024-11-19T12:44:57,864 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 25 msec 2024-11-19T12:44:57,865 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 1588230740 changed from -1.0 to 0.0, refreshing cache 2024-11-19T12:44:57,872 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=aba5a916dfea,40479,1732020294118-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-19T12:44:57,873 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=aba5a916dfea,40479,1732020294118-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T12:44:57,873 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=aba5a916dfea,40479,1732020294118-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T12:44:57,875 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-aba5a916dfea:40479, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T12:44:57,875 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-19T12:44:57,878 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-19T12:44:57,884 DEBUG [master/aba5a916dfea:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-19T12:44:57,910 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 2.467sec 2024-11-19T12:44:57,918 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-19T12:44:57,920 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-19T12:44:57,921 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-19T12:44:57,922 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-19T12:44:57,922 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-19T12:44:57,923 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=aba5a916dfea,40479,1732020294118-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-19T12:44:57,923 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=aba5a916dfea,40479,1732020294118-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-19T12:44:57,929 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-19T12:44:57,930 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-19T12:44:57,931 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=aba5a916dfea,40479,1732020294118-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T12:44:57,975 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@40c90b66, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-19T12:44:57,981 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-11-19T12:44:57,981 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-11-19T12:44:57,984 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request aba5a916dfea,40479,-1 for getting cluster id 2024-11-19T12:44:57,987 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-19T12:44:57,998 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'b0edeebf-8ab8-44bb-b7ae-5c5d0013f53d' 2024-11-19T12:44:58,001 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-19T12:44:58,001 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "b0edeebf-8ab8-44bb-b7ae-5c5d0013f53d" 2024-11-19T12:44:58,001 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@26fbaf8c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-19T12:44:58,001 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [aba5a916dfea,40479,-1] 2024-11-19T12:44:58,004 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-19T12:44:58,005 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T12:44:58,007 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55674, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-19T12:44:58,010 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4af74a56, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-19T12:44:58,010 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-19T12:44:58,018 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=aba5a916dfea,39515,1732020295317, seqNum=-1] 2024-11-19T12:44:58,018 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-19T12:44:58,021 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59664, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-19T12:44:58,043 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=aba5a916dfea,40479,1732020294118 2024-11-19T12:44:58,047 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-19T12:44:58,053 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.AsyncConnectionImpl(321): The fetched master address is aba5a916dfea,40479,1732020294118 2024-11-19T12:44:58,056 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@1d634607 2024-11-19T12:44:58,058 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-19T12:44:58,060 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55686, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-19T12:44:58,068 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40479 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-19T12:44:58,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40479 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC 2024-11-19T12:44:58,081 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_PRE_OPERATION 2024-11-19T12:44:58,084 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40479 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestHBaseWalOnEC" procId is: 4 2024-11-19T12:44:58,084 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:44:58,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40479 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-19T12:44:58,092 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-19T12:44:58,109 WARN [PEWorker-3 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-19T12:44:58,109 WARN [PEWorker-3 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-19T12:44:58,116 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_165084278_22 at /127.0.0.1:43024 [Receiving block BP-1627058744-172.17.0.2-1732020289496:blk_-9223372036854775680_1020] {}] datanode.DataXceiver(331): 127.0.0.1:38679:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:43024 dst: /127.0.0.1:38679 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:44:58,125 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38679 is added to blk_-9223372036854775680_1021 (size=392) 2024-11-19T12:44:58,126 WARN [PEWorker-3 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-19T12:44:58,129 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => d4cdd60b778d8af049d8064f056824a3, NAME => 'TestHBaseWalOnEC,,1732020298062.d4cdd60b778d8af049d8064f056824a3.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:44033/user/jenkins/test-data/462b85e5-3bd6-25b7-7743-cf7d8d82e30b 2024-11-19T12:44:58,138 WARN [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-19T12:44:58,138 WARN [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-19T12:44:58,141 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_165084278_22 at /127.0.0.1:43044 [Receiving block BP-1627058744-172.17.0.2-1732020289496:blk_-9223372036854775664_1022] {}] datanode.DataXceiver(331): 127.0.0.1:38679:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:43044 dst: /127.0.0.1:38679 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:44:58,148 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38679 is added to blk_-9223372036854775664_1023 (size=51) 2024-11-19T12:44:58,149 WARN [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-19T12:44:58,150 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1732020298062.d4cdd60b778d8af049d8064f056824a3.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T12:44:58,150 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1722): Closing d4cdd60b778d8af049d8064f056824a3, disabling compactions & flushes 2024-11-19T12:44:58,150 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1732020298062.d4cdd60b778d8af049d8064f056824a3. 2024-11-19T12:44:58,150 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1732020298062.d4cdd60b778d8af049d8064f056824a3. 2024-11-19T12:44:58,150 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1732020298062.d4cdd60b778d8af049d8064f056824a3. after waiting 0 ms 2024-11-19T12:44:58,150 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1732020298062.d4cdd60b778d8af049d8064f056824a3. 2024-11-19T12:44:58,150 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1732020298062.d4cdd60b778d8af049d8064f056824a3. 2024-11-19T12:44:58,150 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1676): Region close journal for d4cdd60b778d8af049d8064f056824a3: Waiting for close lock at 1732020298150Disabling compacts and flushes for region at 1732020298150Disabling writes for close at 1732020298150Writing region close event to WAL at 1732020298150Closed at 1732020298150 2024-11-19T12:44:58,153 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ADD_TO_META 2024-11-19T12:44:58,158 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestHBaseWalOnEC,,1732020298062.d4cdd60b778d8af049d8064f056824a3.","families":{"info":[{"qualifier":"regioninfo","vlen":50,"tag":[],"timestamp":"1732020298153"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732020298153"}]},"ts":"1732020298153"} 2024-11-19T12:44:58,163 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-19T12:44:58,165 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-19T12:44:58,168 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732020298165"}]},"ts":"1732020298165"} 2024-11-19T12:44:58,172 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLING in hbase:meta 2024-11-19T12:44:58,173 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {aba5a916dfea=0} racks are {/default-rack=0} 2024-11-19T12:44:58,174 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-19T12:44:58,174 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-19T12:44:58,174 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-19T12:44:58,174 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-19T12:44:58,174 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-19T12:44:58,175 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-19T12:44:58,175 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-19T12:44:58,175 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-19T12:44:58,175 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-19T12:44:58,175 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-19T12:44:58,177 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=d4cdd60b778d8af049d8064f056824a3, ASSIGN}] 2024-11-19T12:44:58,180 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=d4cdd60b778d8af049d8064f056824a3, ASSIGN 2024-11-19T12:44:58,182 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=d4cdd60b778d8af049d8064f056824a3, ASSIGN; state=OFFLINE, location=aba5a916dfea,39515,1732020295317; forceNewPlan=false, retain=false 2024-11-19T12:44:58,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40479 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-19T12:44:58,334 INFO [aba5a916dfea:40479 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-11-19T12:44:58,335 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=d4cdd60b778d8af049d8064f056824a3, regionState=OPENING, regionLocation=aba5a916dfea,39515,1732020295317 2024-11-19T12:44:58,341 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=d4cdd60b778d8af049d8064f056824a3, ASSIGN because future has completed 2024-11-19T12:44:58,342 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure d4cdd60b778d8af049d8064f056824a3, server=aba5a916dfea,39515,1732020295317}] 2024-11-19T12:44:58,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40479 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-19T12:44:58,503 INFO [RS_OPEN_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestHBaseWalOnEC,,1732020298062.d4cdd60b778d8af049d8064f056824a3. 2024-11-19T12:44:58,503 DEBUG [RS_OPEN_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => d4cdd60b778d8af049d8064f056824a3, NAME => 'TestHBaseWalOnEC,,1732020298062.d4cdd60b778d8af049d8064f056824a3.', STARTKEY => '', ENDKEY => ''} 2024-11-19T12:44:58,504 DEBUG [RS_OPEN_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestHBaseWalOnEC d4cdd60b778d8af049d8064f056824a3 2024-11-19T12:44:58,504 DEBUG [RS_OPEN_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1732020298062.d4cdd60b778d8af049d8064f056824a3.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T12:44:58,504 DEBUG [RS_OPEN_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for d4cdd60b778d8af049d8064f056824a3 2024-11-19T12:44:58,504 DEBUG [RS_OPEN_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for d4cdd60b778d8af049d8064f056824a3 2024-11-19T12:44:58,507 INFO [StoreOpener-d4cdd60b778d8af049d8064f056824a3-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region d4cdd60b778d8af049d8064f056824a3 2024-11-19T12:44:58,509 INFO [StoreOpener-d4cdd60b778d8af049d8064f056824a3-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d4cdd60b778d8af049d8064f056824a3 columnFamilyName cf 2024-11-19T12:44:58,509 DEBUG [StoreOpener-d4cdd60b778d8af049d8064f056824a3-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:44:58,510 INFO [StoreOpener-d4cdd60b778d8af049d8064f056824a3-1 {}] regionserver.HStore(327): Store=d4cdd60b778d8af049d8064f056824a3/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-19T12:44:58,510 DEBUG [RS_OPEN_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for d4cdd60b778d8af049d8064f056824a3 2024-11-19T12:44:58,511 DEBUG [RS_OPEN_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44033/user/jenkins/test-data/462b85e5-3bd6-25b7-7743-cf7d8d82e30b/data/default/TestHBaseWalOnEC/d4cdd60b778d8af049d8064f056824a3 2024-11-19T12:44:58,512 DEBUG [RS_OPEN_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44033/user/jenkins/test-data/462b85e5-3bd6-25b7-7743-cf7d8d82e30b/data/default/TestHBaseWalOnEC/d4cdd60b778d8af049d8064f056824a3 2024-11-19T12:44:58,513 DEBUG [RS_OPEN_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for d4cdd60b778d8af049d8064f056824a3 2024-11-19T12:44:58,513 DEBUG [RS_OPEN_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for d4cdd60b778d8af049d8064f056824a3 2024-11-19T12:44:58,518 DEBUG [RS_OPEN_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for d4cdd60b778d8af049d8064f056824a3 2024-11-19T12:44:58,523 DEBUG [RS_OPEN_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44033/user/jenkins/test-data/462b85e5-3bd6-25b7-7743-cf7d8d82e30b/data/default/TestHBaseWalOnEC/d4cdd60b778d8af049d8064f056824a3/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-19T12:44:58,524 INFO [RS_OPEN_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened d4cdd60b778d8af049d8064f056824a3; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=67744880, jitterRate=0.009477376937866211}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-19T12:44:58,524 DEBUG [RS_OPEN_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for d4cdd60b778d8af049d8064f056824a3 2024-11-19T12:44:58,525 DEBUG [RS_OPEN_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for d4cdd60b778d8af049d8064f056824a3: Running coprocessor pre-open hook at 1732020298504Writing region info on filesystem at 1732020298504Initializing all the Stores at 1732020298506 (+2 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732020298506Cleaning up temporary data from old regions at 1732020298513 (+7 ms)Running coprocessor post-open hooks at 1732020298524 (+11 ms)Region opened successfully at 1732020298525 (+1 ms) 2024-11-19T12:44:58,528 INFO [RS_OPEN_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestHBaseWalOnEC,,1732020298062.d4cdd60b778d8af049d8064f056824a3., pid=6, masterSystemTime=1732020298497 2024-11-19T12:44:58,531 DEBUG [RS_OPEN_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestHBaseWalOnEC,,1732020298062.d4cdd60b778d8af049d8064f056824a3. 2024-11-19T12:44:58,531 INFO [RS_OPEN_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestHBaseWalOnEC,,1732020298062.d4cdd60b778d8af049d8064f056824a3. 2024-11-19T12:44:58,532 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=d4cdd60b778d8af049d8064f056824a3, regionState=OPEN, openSeqNum=2, regionLocation=aba5a916dfea,39515,1732020295317 2024-11-19T12:44:58,538 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure d4cdd60b778d8af049d8064f056824a3, server=aba5a916dfea,39515,1732020295317 because future has completed 2024-11-19T12:44:58,545 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-19T12:44:58,546 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure d4cdd60b778d8af049d8064f056824a3, server=aba5a916dfea,39515,1732020295317 in 198 msec 2024-11-19T12:44:58,549 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-19T12:44:58,549 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=d4cdd60b778d8af049d8064f056824a3, ASSIGN in 369 msec 2024-11-19T12:44:58,551 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-19T12:44:58,551 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732020298551"}]},"ts":"1732020298551"} 2024-11-19T12:44:58,554 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLED in hbase:meta 2024-11-19T12:44:58,556 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_POST_OPERATION 2024-11-19T12:44:58,560 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC in 485 msec 2024-11-19T12:44:58,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40479 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-19T12:44:58,725 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestHBaseWalOnEC completed 2024-11-19T12:44:58,725 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table TestHBaseWalOnEC get assigned. Timeout = 60000ms 2024-11-19T12:44:58,726 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-19T12:44:58,732 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table TestHBaseWalOnEC assigned to meta. Checking AM states. 2024-11-19T12:44:58,733 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-19T12:44:58,734 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table TestHBaseWalOnEC assigned. 2024-11-19T12:44:58,744 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestHBaseWalOnEC', row='row', locateType=CURRENT is [region=TestHBaseWalOnEC,,1732020298062.d4cdd60b778d8af049d8064f056824a3., hostname=aba5a916dfea,39515,1732020295317, seqNum=2] 2024-11-19T12:44:58,756 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40479 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestHBaseWalOnEC 2024-11-19T12:44:58,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40479 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC 2024-11-19T12:44:58,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40479 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-19T12:44:58,766 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_PREPARE 2024-11-19T12:44:58,769 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-19T12:44:58,771 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-19T12:44:58,848 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44341 is added to blk_-9223372036854775773_1004 (size=42) 2024-11-19T12:44:58,848 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38679 is added to blk_-9223372036854775725_1010 (size=34) 2024-11-19T12:44:58,849 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38679 is added to blk_-9223372036854775772_1004 (size=42) 2024-11-19T12:44:58,849 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44341 is added to blk_-9223372036854775724_1010 (size=34) 2024-11-19T12:44:58,850 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36121 is added to blk_-9223372036854775756_1006 (size=196) 2024-11-19T12:44:58,852 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38679 is added to blk_-9223372036854775757_1006 (size=196) 2024-11-19T12:44:58,865 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36121 is added to blk_-9223372036854775741_1008 (size=1189) 2024-11-19T12:44:58,865 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38679 is added to blk_-9223372036854775740_1008 (size=1189) 2024-11-19T12:44:58,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40479 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-19T12:44:58,932 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39515 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-11-19T12:44:58,933 DEBUG [RS_FLUSH_OPERATIONS-regionserver/aba5a916dfea:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestHBaseWalOnEC,,1732020298062.d4cdd60b778d8af049d8064f056824a3. 2024-11-19T12:44:58,936 INFO [RS_FLUSH_OPERATIONS-regionserver/aba5a916dfea:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing d4cdd60b778d8af049d8064f056824a3 1/1 column families, dataSize=32 B heapSize=360 B 2024-11-19T12:44:59,015 DEBUG [RS_FLUSH_OPERATIONS-regionserver/aba5a916dfea:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44033/user/jenkins/test-data/462b85e5-3bd6-25b7-7743-cf7d8d82e30b/data/default/TestHBaseWalOnEC/d4cdd60b778d8af049d8064f056824a3/.tmp/cf/d03e8bc947104c2ca2aadcb54b22da54 is 36, key is row/cf:cq/1732020298747/Put/seqid=0 2024-11-19T12:44:59,021 WARN [RS_FLUSH_OPERATIONS-regionserver/aba5a916dfea:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-19T12:44:59,021 WARN [RS_FLUSH_OPERATIONS-regionserver/aba5a916dfea:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-19T12:44:59,028 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_332967738_22 at /127.0.0.1:42966 [Receiving block BP-1627058744-172.17.0.2-1732020289496:blk_-9223372036854775648_1024] {}] datanode.DataXceiver(331): 127.0.0.1:44341:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:42966 dst: /127.0.0.1:44341 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:44:59,048 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44341 is added to blk_-9223372036854775648_1025 (size=4787) 2024-11-19T12:44:59,049 WARN [RS_FLUSH_OPERATIONS-regionserver/aba5a916dfea:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-19T12:44:59,049 INFO [RS_FLUSH_OPERATIONS-regionserver/aba5a916dfea:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=32 B at sequenceid=5 (bloomFilter=false), to=hdfs://localhost:44033/user/jenkins/test-data/462b85e5-3bd6-25b7-7743-cf7d8d82e30b/data/default/TestHBaseWalOnEC/d4cdd60b778d8af049d8064f056824a3/.tmp/cf/d03e8bc947104c2ca2aadcb54b22da54 2024-11-19T12:44:59,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40479 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-19T12:44:59,103 DEBUG [RS_FLUSH_OPERATIONS-regionserver/aba5a916dfea:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44033/user/jenkins/test-data/462b85e5-3bd6-25b7-7743-cf7d8d82e30b/data/default/TestHBaseWalOnEC/d4cdd60b778d8af049d8064f056824a3/.tmp/cf/d03e8bc947104c2ca2aadcb54b22da54 as hdfs://localhost:44033/user/jenkins/test-data/462b85e5-3bd6-25b7-7743-cf7d8d82e30b/data/default/TestHBaseWalOnEC/d4cdd60b778d8af049d8064f056824a3/cf/d03e8bc947104c2ca2aadcb54b22da54 2024-11-19T12:44:59,113 INFO [RS_FLUSH_OPERATIONS-regionserver/aba5a916dfea:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44033/user/jenkins/test-data/462b85e5-3bd6-25b7-7743-cf7d8d82e30b/data/default/TestHBaseWalOnEC/d4cdd60b778d8af049d8064f056824a3/cf/d03e8bc947104c2ca2aadcb54b22da54, entries=1, sequenceid=5, filesize=4.7 K 2024-11-19T12:44:59,122 INFO [RS_FLUSH_OPERATIONS-regionserver/aba5a916dfea:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~32 B/32, heapSize ~344 B/344, currentSize=0 B/0 for d4cdd60b778d8af049d8064f056824a3 in 183ms, sequenceid=5, compaction requested=false 2024-11-19T12:44:59,123 DEBUG [RS_FLUSH_OPERATIONS-regionserver/aba5a916dfea:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestHBaseWalOnEC' 2024-11-19T12:44:59,127 DEBUG [RS_FLUSH_OPERATIONS-regionserver/aba5a916dfea:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for d4cdd60b778d8af049d8064f056824a3: 2024-11-19T12:44:59,127 DEBUG [RS_FLUSH_OPERATIONS-regionserver/aba5a916dfea:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestHBaseWalOnEC,,1732020298062.d4cdd60b778d8af049d8064f056824a3. 2024-11-19T12:44:59,129 DEBUG [RS_FLUSH_OPERATIONS-regionserver/aba5a916dfea:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-11-19T12:44:59,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40479 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-11-19T12:44:59,142 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-11-19T12:44:59,143 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 364 msec 2024-11-19T12:44:59,149 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC in 387 msec 2024-11-19T12:44:59,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40479 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-19T12:44:59,395 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestHBaseWalOnEC completed 2024-11-19T12:44:59,406 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-19T12:44:59,407 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-19T12:44:59,407 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-19T12:44:59,412 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T12:44:59,413 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T12:44:59,413 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-19T12:44:59,413 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-19T12:44:59,413 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1575943754, stopped=false 2024-11-19T12:44:59,413 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=aba5a916dfea,40479,1732020294118 2024-11-19T12:44:59,466 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39075-0x1015469e0fb0001, quorum=127.0.0.1:56865, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-19T12:44:59,466 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32985-0x1015469e0fb0002, quorum=127.0.0.1:56865, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-19T12:44:59,466 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40479-0x1015469e0fb0000, quorum=127.0.0.1:56865, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-19T12:44:59,466 DEBUG [pool-77-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39515-0x1015469e0fb0003, quorum=127.0.0.1:56865, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-19T12:44:59,466 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39075-0x1015469e0fb0001, quorum=127.0.0.1:56865, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T12:44:59,466 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32985-0x1015469e0fb0002, quorum=127.0.0.1:56865, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T12:44:59,466 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40479-0x1015469e0fb0000, quorum=127.0.0.1:56865, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T12:44:59,466 DEBUG [pool-77-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39515-0x1015469e0fb0003, quorum=127.0.0.1:56865, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T12:44:59,466 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-19T12:44:59,466 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-19T12:44:59,466 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-19T12:44:59,467 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T12:44:59,467 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:39515-0x1015469e0fb0003, quorum=127.0.0.1:56865, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-19T12:44:59,467 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'aba5a916dfea,39075,1732020295087' ***** 2024-11-19T12:44:59,467 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:39075-0x1015469e0fb0001, quorum=127.0.0.1:56865, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-19T12:44:59,467 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-19T12:44:59,467 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'aba5a916dfea,32985,1732020295240' ***** 2024-11-19T12:44:59,467 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:40479-0x1015469e0fb0000, quorum=127.0.0.1:56865, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-19T12:44:59,467 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-19T12:44:59,467 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'aba5a916dfea,39515,1732020295317' ***** 2024-11-19T12:44:59,467 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-19T12:44:59,467 INFO [RS:0;aba5a916dfea:39075 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-19T12:44:59,467 INFO [RS:1;aba5a916dfea:32985 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-19T12:44:59,467 INFO [RS:2;aba5a916dfea:39515 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-19T12:44:59,468 INFO [RS:0;aba5a916dfea:39075 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-19T12:44:59,468 INFO [RS:2;aba5a916dfea:39515 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-19T12:44:59,468 INFO [RS:1;aba5a916dfea:32985 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-19T12:44:59,468 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-19T12:44:59,468 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:32985-0x1015469e0fb0002, quorum=127.0.0.1:56865, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-19T12:44:59,468 INFO [RS:0;aba5a916dfea:39075 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-19T12:44:59,468 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-19T12:44:59,468 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-19T12:44:59,468 INFO [RS:0;aba5a916dfea:39075 {}] regionserver.HRegionServer(959): stopping server aba5a916dfea,39075,1732020295087 2024-11-19T12:44:59,468 INFO [RS:0;aba5a916dfea:39075 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-19T12:44:59,468 INFO [RS:2;aba5a916dfea:39515 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-19T12:44:59,468 INFO [RS:0;aba5a916dfea:39075 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;aba5a916dfea:39075. 2024-11-19T12:44:59,468 DEBUG [RS:0;aba5a916dfea:39075 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-19T12:44:59,468 DEBUG [RS:0;aba5a916dfea:39075 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T12:44:59,468 INFO [RS:2;aba5a916dfea:39515 {}] regionserver.HRegionServer(3091): Received CLOSE for d4cdd60b778d8af049d8064f056824a3 2024-11-19T12:44:59,469 INFO [RS:1;aba5a916dfea:32985 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-19T12:44:59,469 INFO [RS:1;aba5a916dfea:32985 {}] regionserver.HRegionServer(959): stopping server aba5a916dfea,32985,1732020295240 2024-11-19T12:44:59,469 INFO [RS:0;aba5a916dfea:39075 {}] regionserver.HRegionServer(976): stopping server aba5a916dfea,39075,1732020295087; all regions closed. 2024-11-19T12:44:59,469 INFO [RS:1;aba5a916dfea:32985 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-19T12:44:59,469 INFO [RS:1;aba5a916dfea:32985 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;aba5a916dfea:32985. 2024-11-19T12:44:59,469 DEBUG [RS:1;aba5a916dfea:32985 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-19T12:44:59,469 DEBUG [RS:1;aba5a916dfea:32985 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T12:44:59,469 INFO [RS:2;aba5a916dfea:39515 {}] regionserver.HRegionServer(959): stopping server aba5a916dfea,39515,1732020295317 2024-11-19T12:44:59,469 INFO [RS:1;aba5a916dfea:32985 {}] regionserver.HRegionServer(976): stopping server aba5a916dfea,32985,1732020295240; all regions closed. 2024-11-19T12:44:59,469 INFO [RS:2;aba5a916dfea:39515 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-19T12:44:59,469 INFO [RS:2;aba5a916dfea:39515 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:2;aba5a916dfea:39515. 2024-11-19T12:44:59,469 DEBUG [RS:2;aba5a916dfea:39515 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-19T12:44:59,469 DEBUG [RS:2;aba5a916dfea:39515 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T12:44:59,470 INFO [RS:2;aba5a916dfea:39515 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-19T12:44:59,470 INFO [RS:2;aba5a916dfea:39515 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-19T12:44:59,470 INFO [RS:2;aba5a916dfea:39515 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-19T12:44:59,470 INFO [RS:2;aba5a916dfea:39515 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-19T12:44:59,470 DEBUG [RS_CLOSE_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing d4cdd60b778d8af049d8064f056824a3, disabling compactions & flushes 2024-11-19T12:44:59,470 INFO [RS_CLOSE_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1732020298062.d4cdd60b778d8af049d8064f056824a3. 2024-11-19T12:44:59,470 DEBUG [RS_CLOSE_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1732020298062.d4cdd60b778d8af049d8064f056824a3. 2024-11-19T12:44:59,470 DEBUG [RS_CLOSE_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1732020298062.d4cdd60b778d8af049d8064f056824a3. after waiting 0 ms 2024-11-19T12:44:59,470 DEBUG [RS_CLOSE_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1732020298062.d4cdd60b778d8af049d8064f056824a3. 2024-11-19T12:44:59,471 INFO [RS:2;aba5a916dfea:39515 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-19T12:44:59,471 DEBUG [RS:2;aba5a916dfea:39515 {}] regionserver.HRegionServer(1325): Online Regions={d4cdd60b778d8af049d8064f056824a3=TestHBaseWalOnEC,,1732020298062.d4cdd60b778d8af049d8064f056824a3., 1588230740=hbase:meta,,1.1588230740} 2024-11-19T12:44:59,471 DEBUG [RS_CLOSE_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-19T12:44:59,471 INFO [RS_CLOSE_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-19T12:44:59,471 DEBUG [RS_CLOSE_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-19T12:44:59,471 DEBUG [RS:2;aba5a916dfea:39515 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, d4cdd60b778d8af049d8064f056824a3 2024-11-19T12:44:59,471 DEBUG [RS_CLOSE_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-19T12:44:59,471 DEBUG [RS_CLOSE_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-19T12:44:59,471 INFO [RS_CLOSE_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.34 KB heapSize=3.38 KB 2024-11-19T12:44:59,475 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36121 is added to blk_1073741827_1017 (size=93) 2024-11-19T12:44:59,475 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38679 is added to blk_1073741827_1017 (size=93) 2024-11-19T12:44:59,475 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38679 is added to blk_1073741826_1016 (size=93) 2024-11-19T12:44:59,475 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44341 is added to blk_1073741826_1016 (size=93) 2024-11-19T12:44:59,475 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44341 is added to blk_1073741827_1017 (size=93) 2024-11-19T12:44:59,477 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36121 is added to blk_1073741826_1016 (size=93) 2024-11-19T12:44:59,489 DEBUG [RS:1;aba5a916dfea:32985 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/462b85e5-3bd6-25b7-7743-cf7d8d82e30b/oldWALs 2024-11-19T12:44:59,489 DEBUG [RS:0;aba5a916dfea:39075 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/462b85e5-3bd6-25b7-7743-cf7d8d82e30b/oldWALs 2024-11-19T12:44:59,489 INFO [RS:1;aba5a916dfea:32985 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL aba5a916dfea%2C32985%2C1732020295240:(num 1732020297091) 2024-11-19T12:44:59,489 INFO [RS:0;aba5a916dfea:39075 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL aba5a916dfea%2C39075%2C1732020295087:(num 1732020297091) 2024-11-19T12:44:59,489 DEBUG [RS:1;aba5a916dfea:32985 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T12:44:59,489 DEBUG [RS:0;aba5a916dfea:39075 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T12:44:59,489 INFO [RS:1;aba5a916dfea:32985 {}] regionserver.LeaseManager(133): Closed leases 2024-11-19T12:44:59,489 INFO [RS:0;aba5a916dfea:39075 {}] regionserver.LeaseManager(133): Closed leases 2024-11-19T12:44:59,489 INFO [RS:1;aba5a916dfea:32985 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-19T12:44:59,489 INFO [RS:0;aba5a916dfea:39075 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-19T12:44:59,490 INFO [RS:1;aba5a916dfea:32985 {}] hbase.ChoreService(370): Chore service for: regionserver/aba5a916dfea:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-19T12:44:59,490 INFO [RS:0;aba5a916dfea:39075 {}] hbase.ChoreService(370): Chore service for: regionserver/aba5a916dfea:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-19T12:44:59,490 INFO [RS:0;aba5a916dfea:39075 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-19T12:44:59,490 INFO [RS:1;aba5a916dfea:32985 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-19T12:44:59,490 INFO [RS:0;aba5a916dfea:39075 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-19T12:44:59,490 INFO [regionserver/aba5a916dfea:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-19T12:44:59,490 INFO [RS:1;aba5a916dfea:32985 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-19T12:44:59,490 INFO [RS:0;aba5a916dfea:39075 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-19T12:44:59,490 INFO [RS:1;aba5a916dfea:32985 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-19T12:44:59,490 INFO [RS:0;aba5a916dfea:39075 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-19T12:44:59,490 INFO [regionserver/aba5a916dfea:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-19T12:44:59,490 INFO [RS:1;aba5a916dfea:32985 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-19T12:44:59,490 INFO [RS:1;aba5a916dfea:32985 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:32985 2024-11-19T12:44:59,490 INFO [RS:0;aba5a916dfea:39075 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:39075 2024-11-19T12:44:59,496 DEBUG [RS_CLOSE_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44033/user/jenkins/test-data/462b85e5-3bd6-25b7-7743-cf7d8d82e30b/data/default/TestHBaseWalOnEC/d4cdd60b778d8af049d8064f056824a3/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-11-19T12:44:59,499 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39075-0x1015469e0fb0001, quorum=127.0.0.1:56865, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/aba5a916dfea,39075,1732020295087 2024-11-19T12:44:59,499 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40479-0x1015469e0fb0000, quorum=127.0.0.1:56865, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-19T12:44:59,499 INFO [RS_CLOSE_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1732020298062.d4cdd60b778d8af049d8064f056824a3. 2024-11-19T12:44:59,499 DEBUG [RS_CLOSE_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for d4cdd60b778d8af049d8064f056824a3: Waiting for close lock at 1732020299469Running coprocessor pre-close hooks at 1732020299470 (+1 ms)Disabling compacts and flushes for region at 1732020299470Disabling writes for close at 1732020299470Writing region close event to WAL at 1732020299472 (+2 ms)Running coprocessor post-close hooks at 1732020299498 (+26 ms)Closed at 1732020299499 (+1 ms) 2024-11-19T12:44:59,500 INFO [RS:0;aba5a916dfea:39075 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-19T12:44:59,500 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32985-0x1015469e0fb0002, quorum=127.0.0.1:56865, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/aba5a916dfea,32985,1732020295240 2024-11-19T12:44:59,500 DEBUG [RS_CLOSE_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestHBaseWalOnEC,,1732020298062.d4cdd60b778d8af049d8064f056824a3. 2024-11-19T12:44:59,500 INFO [RS:1;aba5a916dfea:32985 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-19T12:44:59,500 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [aba5a916dfea,32985,1732020295240] 2024-11-19T12:44:59,513 INFO [regionserver/aba5a916dfea:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-19T12:44:59,516 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/aba5a916dfea,32985,1732020295240 already deleted, retry=false 2024-11-19T12:44:59,516 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; aba5a916dfea,32985,1732020295240 expired; onlineServers=2 2024-11-19T12:44:59,516 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [aba5a916dfea,39075,1732020295087] 2024-11-19T12:44:59,517 DEBUG [RS_CLOSE_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44033/user/jenkins/test-data/462b85e5-3bd6-25b7-7743-cf7d8d82e30b/data/hbase/meta/1588230740/.tmp/info/3617480a9d644d5097d1a90494c74198 is 153, key is TestHBaseWalOnEC,,1732020298062.d4cdd60b778d8af049d8064f056824a3./info:regioninfo/1732020298532/Put/seqid=0 2024-11-19T12:44:59,519 INFO [regionserver/aba5a916dfea:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-19T12:44:59,520 INFO [regionserver/aba5a916dfea:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-19T12:44:59,521 WARN [RS_CLOSE_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-19T12:44:59,521 WARN [RS_CLOSE_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-19T12:44:59,524 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/aba5a916dfea,39075,1732020295087 already deleted, retry=false 2024-11-19T12:44:59,524 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; aba5a916dfea,39075,1732020295087 expired; onlineServers=1 2024-11-19T12:44:59,526 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_332967738_22 at /127.0.0.1:49284 [Receiving block BP-1627058744-172.17.0.2-1732020289496:blk_-9223372036854775632_1026] {}] datanode.DataXceiver(331): 127.0.0.1:36121:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:49284 dst: /127.0.0.1:36121 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:44:59,531 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36121 is added to blk_-9223372036854775632_1027 (size=6637) 2024-11-19T12:44:59,532 WARN [RS_CLOSE_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-19T12:44:59,532 INFO [RS_CLOSE_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.18 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:44033/user/jenkins/test-data/462b85e5-3bd6-25b7-7743-cf7d8d82e30b/data/hbase/meta/1588230740/.tmp/info/3617480a9d644d5097d1a90494c74198 2024-11-19T12:44:59,565 DEBUG [RS_CLOSE_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44033/user/jenkins/test-data/462b85e5-3bd6-25b7-7743-cf7d8d82e30b/data/hbase/meta/1588230740/.tmp/ns/feece5e193df4a9087bff648a3951637 is 43, key is default/ns:d/1732020297805/Put/seqid=0 2024-11-19T12:44:59,568 WARN [RS_CLOSE_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-19T12:44:59,568 WARN [RS_CLOSE_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-19T12:44:59,572 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_332967738_22 at /127.0.0.1:42988 [Receiving block BP-1627058744-172.17.0.2-1732020289496:blk_-9223372036854775616_1028] {}] datanode.DataXceiver(331): 127.0.0.1:44341:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:42988 dst: /127.0.0.1:44341 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:44:59,576 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44341 is added to blk_-9223372036854775616_1029 (size=5153) 2024-11-19T12:44:59,577 WARN [RS_CLOSE_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-19T12:44:59,577 INFO [RS_CLOSE_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:44033/user/jenkins/test-data/462b85e5-3bd6-25b7-7743-cf7d8d82e30b/data/hbase/meta/1588230740/.tmp/ns/feece5e193df4a9087bff648a3951637 2024-11-19T12:44:59,605 DEBUG [RS_CLOSE_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44033/user/jenkins/test-data/462b85e5-3bd6-25b7-7743-cf7d8d82e30b/data/hbase/meta/1588230740/.tmp/table/02cae1dc7d834ba58d1ee412fafd3b11 is 52, key is TestHBaseWalOnEC/table:state/1732020298551/Put/seqid=0 2024-11-19T12:44:59,607 WARN [RS_CLOSE_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-19T12:44:59,608 WARN [RS_CLOSE_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-19T12:44:59,608 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39075-0x1015469e0fb0001, quorum=127.0.0.1:56865, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-19T12:44:59,608 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39075-0x1015469e0fb0001, quorum=127.0.0.1:56865, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-19T12:44:59,608 INFO [RS:0;aba5a916dfea:39075 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-19T12:44:59,609 INFO [RS:0;aba5a916dfea:39075 {}] regionserver.HRegionServer(1031): Exiting; stopping=aba5a916dfea,39075,1732020295087; zookeeper connection closed. 2024-11-19T12:44:59,609 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@387004aa {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@387004aa 2024-11-19T12:44:59,612 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_332967738_22 at /127.0.0.1:43004 [Receiving block BP-1627058744-172.17.0.2-1732020289496:blk_-9223372036854775600_1030] {}] datanode.DataXceiver(331): 127.0.0.1:44341:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:43004 dst: /127.0.0.1:44341 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:44:59,616 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32985-0x1015469e0fb0002, quorum=127.0.0.1:56865, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-19T12:44:59,616 INFO [RS:1;aba5a916dfea:32985 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-19T12:44:59,616 INFO [RS:1;aba5a916dfea:32985 {}] regionserver.HRegionServer(1031): Exiting; stopping=aba5a916dfea,32985,1732020295240; zookeeper connection closed. 2024-11-19T12:44:59,616 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32985-0x1015469e0fb0002, quorum=127.0.0.1:56865, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-19T12:44:59,617 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@28e7c52e {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@28e7c52e 2024-11-19T12:44:59,620 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44341 is added to blk_-9223372036854775600_1031 (size=5249) 2024-11-19T12:44:59,621 WARN [RS_CLOSE_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-19T12:44:59,621 INFO [RS_CLOSE_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=96 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:44033/user/jenkins/test-data/462b85e5-3bd6-25b7-7743-cf7d8d82e30b/data/hbase/meta/1588230740/.tmp/table/02cae1dc7d834ba58d1ee412fafd3b11 2024-11-19T12:44:59,633 DEBUG [RS_CLOSE_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44033/user/jenkins/test-data/462b85e5-3bd6-25b7-7743-cf7d8d82e30b/data/hbase/meta/1588230740/.tmp/info/3617480a9d644d5097d1a90494c74198 as hdfs://localhost:44033/user/jenkins/test-data/462b85e5-3bd6-25b7-7743-cf7d8d82e30b/data/hbase/meta/1588230740/info/3617480a9d644d5097d1a90494c74198 2024-11-19T12:44:59,644 INFO [RS_CLOSE_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44033/user/jenkins/test-data/462b85e5-3bd6-25b7-7743-cf7d8d82e30b/data/hbase/meta/1588230740/info/3617480a9d644d5097d1a90494c74198, entries=10, sequenceid=11, filesize=6.5 K 2024-11-19T12:44:59,647 DEBUG [RS_CLOSE_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44033/user/jenkins/test-data/462b85e5-3bd6-25b7-7743-cf7d8d82e30b/data/hbase/meta/1588230740/.tmp/ns/feece5e193df4a9087bff648a3951637 as hdfs://localhost:44033/user/jenkins/test-data/462b85e5-3bd6-25b7-7743-cf7d8d82e30b/data/hbase/meta/1588230740/ns/feece5e193df4a9087bff648a3951637 2024-11-19T12:44:59,659 INFO [RS_CLOSE_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44033/user/jenkins/test-data/462b85e5-3bd6-25b7-7743-cf7d8d82e30b/data/hbase/meta/1588230740/ns/feece5e193df4a9087bff648a3951637, entries=2, sequenceid=11, filesize=5.0 K 2024-11-19T12:44:59,661 DEBUG [RS_CLOSE_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44033/user/jenkins/test-data/462b85e5-3bd6-25b7-7743-cf7d8d82e30b/data/hbase/meta/1588230740/.tmp/table/02cae1dc7d834ba58d1ee412fafd3b11 as hdfs://localhost:44033/user/jenkins/test-data/462b85e5-3bd6-25b7-7743-cf7d8d82e30b/data/hbase/meta/1588230740/table/02cae1dc7d834ba58d1ee412fafd3b11 2024-11-19T12:44:59,671 DEBUG [RS:2;aba5a916dfea:39515 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-19T12:44:59,674 INFO [RS_CLOSE_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44033/user/jenkins/test-data/462b85e5-3bd6-25b7-7743-cf7d8d82e30b/data/hbase/meta/1588230740/table/02cae1dc7d834ba58d1ee412fafd3b11, entries=2, sequenceid=11, filesize=5.1 K 2024-11-19T12:44:59,676 INFO [RS_CLOSE_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 205ms, sequenceid=11, compaction requested=false 2024-11-19T12:44:59,676 DEBUG [RS_CLOSE_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-19T12:44:59,697 DEBUG [RS_CLOSE_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44033/user/jenkins/test-data/462b85e5-3bd6-25b7-7743-cf7d8d82e30b/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-19T12:44:59,699 DEBUG [RS_CLOSE_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-19T12:44:59,699 INFO [RS_CLOSE_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-19T12:44:59,699 DEBUG [RS_CLOSE_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732020299471Running coprocessor pre-close hooks at 1732020299471Disabling compacts and flushes for region at 1732020299471Disabling writes for close at 1732020299471Obtaining lock to block concurrent updates at 1732020299471Preparing flush snapshotting stores in 1588230740 at 1732020299471Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1377, getHeapSize=3392, getOffHeapSize=0, getCellsCount=14 at 1732020299473 (+2 ms)Flushing stores of hbase:meta,,1.1588230740 at 1732020299479 (+6 ms)Flushing 1588230740/info: creating writer at 1732020299479Flushing 1588230740/info: appending metadata at 1732020299511 (+32 ms)Flushing 1588230740/info: closing flushed file at 1732020299511Flushing 1588230740/ns: creating writer at 1732020299544 (+33 ms)Flushing 1588230740/ns: appending metadata at 1732020299563 (+19 ms)Flushing 1588230740/ns: closing flushed file at 1732020299563Flushing 1588230740/table: creating writer at 1732020299587 (+24 ms)Flushing 1588230740/table: appending metadata at 1732020299604 (+17 ms)Flushing 1588230740/table: closing flushed file at 1732020299604Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@304aa4d1: reopening flushed file at 1732020299631 (+27 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7850c183: reopening flushed file at 1732020299645 (+14 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6dc48082: reopening flushed file at 1732020299659 (+14 ms)Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 205ms, sequenceid=11, compaction requested=false at 1732020299676 (+17 ms)Writing region close event to WAL at 1732020299687 (+11 ms)Running coprocessor post-close hooks at 1732020299698 (+11 ms)Closed at 1732020299699 (+1 ms) 2024-11-19T12:44:59,699 DEBUG [RS_CLOSE_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-19T12:44:59,872 INFO [RS:2;aba5a916dfea:39515 {}] regionserver.HRegionServer(976): stopping server aba5a916dfea,39515,1732020295317; all regions closed. 2024-11-19T12:44:59,875 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36121 is added to blk_1073741829_1019 (size=2751) 2024-11-19T12:44:59,875 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38679 is added to blk_1073741829_1019 (size=2751) 2024-11-19T12:44:59,875 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44341 is added to blk_1073741829_1019 (size=2751) 2024-11-19T12:44:59,879 DEBUG [RS:2;aba5a916dfea:39515 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/462b85e5-3bd6-25b7-7743-cf7d8d82e30b/oldWALs 2024-11-19T12:44:59,879 INFO [RS:2;aba5a916dfea:39515 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL aba5a916dfea%2C39515%2C1732020295317.meta:.meta(num 1732020297554) 2024-11-19T12:44:59,882 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44341 is added to blk_1073741828_1018 (size=1298) 2024-11-19T12:44:59,883 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36121 is added to blk_1073741828_1018 (size=1298) 2024-11-19T12:44:59,883 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38679 is added to blk_1073741828_1018 (size=1298) 2024-11-19T12:44:59,886 DEBUG [RS:2;aba5a916dfea:39515 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/462b85e5-3bd6-25b7-7743-cf7d8d82e30b/oldWALs 2024-11-19T12:44:59,887 INFO [RS:2;aba5a916dfea:39515 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL aba5a916dfea%2C39515%2C1732020295317:(num 1732020297091) 2024-11-19T12:44:59,887 DEBUG [RS:2;aba5a916dfea:39515 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T12:44:59,887 INFO [RS:2;aba5a916dfea:39515 {}] regionserver.LeaseManager(133): Closed leases 2024-11-19T12:44:59,887 INFO [RS:2;aba5a916dfea:39515 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-19T12:44:59,887 INFO [RS:2;aba5a916dfea:39515 {}] hbase.ChoreService(370): Chore service for: regionserver/aba5a916dfea:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-19T12:44:59,887 INFO [RS:2;aba5a916dfea:39515 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-19T12:44:59,887 INFO [regionserver/aba5a916dfea:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-19T12:44:59,888 INFO [RS:2;aba5a916dfea:39515 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:39515 2024-11-19T12:44:59,907 DEBUG [pool-77-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39515-0x1015469e0fb0003, quorum=127.0.0.1:56865, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/aba5a916dfea,39515,1732020295317 2024-11-19T12:44:59,907 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40479-0x1015469e0fb0000, quorum=127.0.0.1:56865, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-19T12:44:59,907 INFO [RS:2;aba5a916dfea:39515 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-19T12:44:59,915 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [aba5a916dfea,39515,1732020295317] 2024-11-19T12:44:59,924 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/aba5a916dfea,39515,1732020295317 already deleted, retry=false 2024-11-19T12:44:59,924 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; aba5a916dfea,39515,1732020295317 expired; onlineServers=0 2024-11-19T12:44:59,924 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'aba5a916dfea,40479,1732020294118' ***** 2024-11-19T12:44:59,924 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-19T12:44:59,924 INFO [M:0;aba5a916dfea:40479 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-19T12:44:59,924 INFO [M:0;aba5a916dfea:40479 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-19T12:44:59,925 DEBUG [M:0;aba5a916dfea:40479 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-19T12:44:59,925 DEBUG [M:0;aba5a916dfea:40479 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-19T12:44:59,925 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-19T12:44:59,925 DEBUG [master/aba5a916dfea:0:becomeActiveMaster-HFileCleaner.small.0-1732020296687 {}] cleaner.HFileCleaner(306): Exit Thread[master/aba5a916dfea:0:becomeActiveMaster-HFileCleaner.small.0-1732020296687,5,FailOnTimeoutGroup] 2024-11-19T12:44:59,925 DEBUG [master/aba5a916dfea:0:becomeActiveMaster-HFileCleaner.large.0-1732020296678 {}] cleaner.HFileCleaner(306): Exit Thread[master/aba5a916dfea:0:becomeActiveMaster-HFileCleaner.large.0-1732020296678,5,FailOnTimeoutGroup] 2024-11-19T12:44:59,925 INFO [M:0;aba5a916dfea:40479 {}] hbase.ChoreService(370): Chore service for: master/aba5a916dfea:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-19T12:44:59,925 INFO [M:0;aba5a916dfea:40479 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-19T12:44:59,925 DEBUG [M:0;aba5a916dfea:40479 {}] master.HMaster(1795): Stopping service threads 2024-11-19T12:44:59,925 INFO [M:0;aba5a916dfea:40479 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-19T12:44:59,926 INFO [M:0;aba5a916dfea:40479 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-19T12:44:59,926 INFO [M:0;aba5a916dfea:40479 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-19T12:44:59,926 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-19T12:44:59,932 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40479-0x1015469e0fb0000, quorum=127.0.0.1:56865, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-19T12:44:59,932 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40479-0x1015469e0fb0000, quorum=127.0.0.1:56865, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T12:44:59,932 DEBUG [M:0;aba5a916dfea:40479 {}] zookeeper.ZKUtil(347): master:40479-0x1015469e0fb0000, quorum=127.0.0.1:56865, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-19T12:44:59,932 WARN [M:0;aba5a916dfea:40479 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-19T12:44:59,933 INFO [M:0;aba5a916dfea:40479 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:44033/user/jenkins/test-data/462b85e5-3bd6-25b7-7743-cf7d8d82e30b/.lastflushedseqids 2024-11-19T12:44:59,947 WARN [M:0;aba5a916dfea:40479 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-19T12:44:59,947 WARN [M:0;aba5a916dfea:40479 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-19T12:44:59,954 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_165084278_22 at /127.0.0.1:43124 [Receiving block BP-1627058744-172.17.0.2-1732020289496:blk_-9223372036854775584_1032] {}] datanode.DataXceiver(331): 127.0.0.1:38679:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:43124 dst: /127.0.0.1:38679 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:44:59,962 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38679 is added to blk_-9223372036854775584_1033 (size=127) 2024-11-19T12:44:59,963 WARN [M:0;aba5a916dfea:40479 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-19T12:44:59,963 INFO [M:0;aba5a916dfea:40479 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-19T12:44:59,964 INFO [M:0;aba5a916dfea:40479 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-19T12:44:59,964 DEBUG [M:0;aba5a916dfea:40479 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-19T12:44:59,964 INFO [M:0;aba5a916dfea:40479 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T12:44:59,964 DEBUG [M:0;aba5a916dfea:40479 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T12:44:59,964 DEBUG [M:0;aba5a916dfea:40479 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-19T12:44:59,964 DEBUG [M:0;aba5a916dfea:40479 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T12:44:59,964 INFO [M:0;aba5a916dfea:40479 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=26.84 KB heapSize=34.13 KB 2024-11-19T12:44:59,990 DEBUG [M:0;aba5a916dfea:40479 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44033/user/jenkins/test-data/462b85e5-3bd6-25b7-7743-cf7d8d82e30b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/143820ece8914485b1804370f982bfd0 is 82, key is hbase:meta,,1/info:regioninfo/1732020297673/Put/seqid=0 2024-11-19T12:44:59,993 WARN [M:0;aba5a916dfea:40479 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-19T12:44:59,993 WARN [M:0;aba5a916dfea:40479 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-19T12:44:59,999 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_165084278_22 at /127.0.0.1:43020 [Receiving block BP-1627058744-172.17.0.2-1732020289496:blk_-9223372036854775568_1034] {}] datanode.DataXceiver(331): 127.0.0.1:44341:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:43020 dst: /127.0.0.1:44341 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:45:00,002 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44341 is added to blk_-9223372036854775568_1035 (size=5672) 2024-11-19T12:45:00,003 WARN [M:0;aba5a916dfea:40479 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-19T12:45:00,003 INFO [M:0;aba5a916dfea:40479 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:44033/user/jenkins/test-data/462b85e5-3bd6-25b7-7743-cf7d8d82e30b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/143820ece8914485b1804370f982bfd0 2024-11-19T12:45:00,016 DEBUG [pool-77-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39515-0x1015469e0fb0003, quorum=127.0.0.1:56865, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-19T12:45:00,016 INFO [RS:2;aba5a916dfea:39515 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-19T12:45:00,016 DEBUG [pool-77-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39515-0x1015469e0fb0003, quorum=127.0.0.1:56865, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-19T12:45:00,016 INFO [RS:2;aba5a916dfea:39515 {}] regionserver.HRegionServer(1031): Exiting; stopping=aba5a916dfea,39515,1732020295317; zookeeper connection closed. 2024-11-19T12:45:00,016 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@1492eb48 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@1492eb48 2024-11-19T12:45:00,016 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 3 regionserver(s) complete 2024-11-19T12:45:00,041 DEBUG [M:0;aba5a916dfea:40479 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44033/user/jenkins/test-data/462b85e5-3bd6-25b7-7743-cf7d8d82e30b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/324d4c21696c4bec8ac40d9d573fca60 is 749, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1732020298559/Put/seqid=0 2024-11-19T12:45:00,044 WARN [M:0;aba5a916dfea:40479 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-19T12:45:00,044 WARN [M:0;aba5a916dfea:40479 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-19T12:45:00,052 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_165084278_22 at /127.0.0.1:49296 [Receiving block BP-1627058744-172.17.0.2-1732020289496:blk_-9223372036854775552_1036] {}] datanode.DataXceiver(331): 127.0.0.1:36121:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:49296 dst: /127.0.0.1:36121 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:45:00,058 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36121 is added to blk_-9223372036854775552_1037 (size=6440) 2024-11-19T12:45:00,059 WARN [M:0;aba5a916dfea:40479 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-19T12:45:00,059 INFO [M:0;aba5a916dfea:40479 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.16 KB at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:44033/user/jenkins/test-data/462b85e5-3bd6-25b7-7743-cf7d8d82e30b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/324d4c21696c4bec8ac40d9d573fca60 2024-11-19T12:45:00,089 DEBUG [M:0;aba5a916dfea:40479 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44033/user/jenkins/test-data/462b85e5-3bd6-25b7-7743-cf7d8d82e30b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/1e4746fecc854e3a90e6776881f98c7c is 69, key is aba5a916dfea,32985,1732020295240/rs:state/1732020296718/Put/seqid=0 2024-11-19T12:45:00,091 WARN [M:0;aba5a916dfea:40479 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-19T12:45:00,092 WARN [M:0;aba5a916dfea:40479 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-19T12:45:00,094 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_165084278_22 at /127.0.0.1:49328 [Receiving block BP-1627058744-172.17.0.2-1732020289496:blk_-9223372036854775536_1038] {}] datanode.DataXceiver(331): 127.0.0.1:36121:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:49328 dst: /127.0.0.1:36121 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:45:00,099 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36121 is added to blk_-9223372036854775536_1039 (size=5294) 2024-11-19T12:45:00,099 WARN [M:0;aba5a916dfea:40479 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-19T12:45:00,100 INFO [M:0;aba5a916dfea:40479 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=195 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:44033/user/jenkins/test-data/462b85e5-3bd6-25b7-7743-cf7d8d82e30b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/1e4746fecc854e3a90e6776881f98c7c 2024-11-19T12:45:00,112 DEBUG [M:0;aba5a916dfea:40479 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44033/user/jenkins/test-data/462b85e5-3bd6-25b7-7743-cf7d8d82e30b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/143820ece8914485b1804370f982bfd0 as hdfs://localhost:44033/user/jenkins/test-data/462b85e5-3bd6-25b7-7743-cf7d8d82e30b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/143820ece8914485b1804370f982bfd0 2024-11-19T12:45:00,119 INFO [M:0;aba5a916dfea:40479 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44033/user/jenkins/test-data/462b85e5-3bd6-25b7-7743-cf7d8d82e30b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/143820ece8914485b1804370f982bfd0, entries=8, sequenceid=72, filesize=5.5 K 2024-11-19T12:45:00,121 DEBUG [M:0;aba5a916dfea:40479 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44033/user/jenkins/test-data/462b85e5-3bd6-25b7-7743-cf7d8d82e30b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/324d4c21696c4bec8ac40d9d573fca60 as hdfs://localhost:44033/user/jenkins/test-data/462b85e5-3bd6-25b7-7743-cf7d8d82e30b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/324d4c21696c4bec8ac40d9d573fca60 2024-11-19T12:45:00,131 INFO [M:0;aba5a916dfea:40479 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44033/user/jenkins/test-data/462b85e5-3bd6-25b7-7743-cf7d8d82e30b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/324d4c21696c4bec8ac40d9d573fca60, entries=8, sequenceid=72, filesize=6.3 K 2024-11-19T12:45:00,133 DEBUG [M:0;aba5a916dfea:40479 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44033/user/jenkins/test-data/462b85e5-3bd6-25b7-7743-cf7d8d82e30b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/1e4746fecc854e3a90e6776881f98c7c as hdfs://localhost:44033/user/jenkins/test-data/462b85e5-3bd6-25b7-7743-cf7d8d82e30b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/1e4746fecc854e3a90e6776881f98c7c 2024-11-19T12:45:00,141 INFO [M:0;aba5a916dfea:40479 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44033/user/jenkins/test-data/462b85e5-3bd6-25b7-7743-cf7d8d82e30b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/1e4746fecc854e3a90e6776881f98c7c, entries=3, sequenceid=72, filesize=5.2 K 2024-11-19T12:45:00,143 INFO [M:0;aba5a916dfea:40479 {}] regionserver.HRegion(3140): Finished flush of dataSize ~26.84 KB/27483, heapSize ~33.83 KB/34640, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 179ms, sequenceid=72, compaction requested=false 2024-11-19T12:45:00,145 INFO [M:0;aba5a916dfea:40479 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T12:45:00,146 DEBUG [M:0;aba5a916dfea:40479 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732020299964Disabling compacts and flushes for region at 1732020299964Disabling writes for close at 1732020299964Obtaining lock to block concurrent updates at 1732020299964Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732020299965 (+1 ms)Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=27483, getHeapSize=34880, getOffHeapSize=0, getCellsCount=85 at 1732020299965Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732020299966 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732020299967 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732020299989 (+22 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732020299989Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732020300014 (+25 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732020300041 (+27 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732020300041Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732020300067 (+26 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732020300088 (+21 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732020300088Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@729dbc20: reopening flushed file at 1732020300110 (+22 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@553f5bb1: reopening flushed file at 1732020300119 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2a62224b: reopening flushed file at 1732020300131 (+12 ms)Finished flush of dataSize ~26.84 KB/27483, heapSize ~33.83 KB/34640, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 179ms, sequenceid=72, compaction requested=false at 1732020300143 (+12 ms)Writing region close event to WAL at 1732020300145 (+2 ms)Closed at 1732020300145 2024-11-19T12:45:00,149 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36121 is added to blk_1073741825_1011 (size=32686) 2024-11-19T12:45:00,150 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38679 is added to blk_1073741825_1011 (size=32686) 2024-11-19T12:45:00,150 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44341 is added to blk_1073741825_1011 (size=32686) 2024-11-19T12:45:00,150 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-19T12:45:00,151 INFO [M:0;aba5a916dfea:40479 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-19T12:45:00,151 INFO [M:0;aba5a916dfea:40479 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:40479 2024-11-19T12:45:00,151 INFO [M:0;aba5a916dfea:40479 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-19T12:45:00,280 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40479-0x1015469e0fb0000, quorum=127.0.0.1:56865, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-19T12:45:00,280 INFO [M:0;aba5a916dfea:40479 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-19T12:45:00,280 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40479-0x1015469e0fb0000, quorum=127.0.0.1:56865, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-19T12:45:00,310 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2a9ecb50{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T12:45:00,317 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4067fd8a{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-19T12:45:00,317 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-19T12:45:00,317 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@146c020c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-19T12:45:00,317 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@233bb3ea{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1913be47-5358-ff7c-2f97-6096e5f45004/hadoop.log.dir/,STOPPED} 2024-11-19T12:45:00,321 WARN [BP-1627058744-172.17.0.2-1732020289496 heartbeating to localhost/127.0.0.1:44033 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-19T12:45:00,321 WARN [BP-1627058744-172.17.0.2-1732020289496 heartbeating to localhost/127.0.0.1:44033 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1627058744-172.17.0.2-1732020289496 (Datanode Uuid bed16c64-0153-4447-a8cf-a6c16b525072) service to localhost/127.0.0.1:44033 2024-11-19T12:45:00,322 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1913be47-5358-ff7c-2f97-6096e5f45004/cluster_9d93da93-c9ba-544b-3001-3bf8a224a26e/data/data5/current/BP-1627058744-172.17.0.2-1732020289496 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T12:45:00,322 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1913be47-5358-ff7c-2f97-6096e5f45004/cluster_9d93da93-c9ba-544b-3001-3bf8a224a26e/data/data6/current/BP-1627058744-172.17.0.2-1732020289496 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T12:45:00,322 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-19T12:45:00,322 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-19T12:45:00,323 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-19T12:45:00,331 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@14402056{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T12:45:00,331 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3e1cb3ec{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-19T12:45:00,331 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-19T12:45:00,332 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@65cd6e19{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-19T12:45:00,332 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5435fd88{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1913be47-5358-ff7c-2f97-6096e5f45004/hadoop.log.dir/,STOPPED} 2024-11-19T12:45:00,333 WARN [BP-1627058744-172.17.0.2-1732020289496 heartbeating to localhost/127.0.0.1:44033 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-19T12:45:00,333 WARN [BP-1627058744-172.17.0.2-1732020289496 heartbeating to localhost/127.0.0.1:44033 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1627058744-172.17.0.2-1732020289496 (Datanode Uuid df794f08-e502-4164-a31a-dcfc4e2d6194) service to localhost/127.0.0.1:44033 2024-11-19T12:45:00,334 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1913be47-5358-ff7c-2f97-6096e5f45004/cluster_9d93da93-c9ba-544b-3001-3bf8a224a26e/data/data3/current/BP-1627058744-172.17.0.2-1732020289496 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T12:45:00,334 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1913be47-5358-ff7c-2f97-6096e5f45004/cluster_9d93da93-c9ba-544b-3001-3bf8a224a26e/data/data4/current/BP-1627058744-172.17.0.2-1732020289496 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T12:45:00,334 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-19T12:45:00,334 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-19T12:45:00,335 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-19T12:45:00,344 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5b23cf15{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T12:45:00,344 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7f9e5902{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-19T12:45:00,344 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-19T12:45:00,345 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3b3a0659{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-19T12:45:00,345 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3665148e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1913be47-5358-ff7c-2f97-6096e5f45004/hadoop.log.dir/,STOPPED} 2024-11-19T12:45:00,346 WARN [BP-1627058744-172.17.0.2-1732020289496 heartbeating to localhost/127.0.0.1:44033 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-19T12:45:00,346 WARN [BP-1627058744-172.17.0.2-1732020289496 heartbeating to localhost/127.0.0.1:44033 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1627058744-172.17.0.2-1732020289496 (Datanode Uuid 44509b0b-7f0c-4d2e-8f17-d0d0a3f1db0b) service to localhost/127.0.0.1:44033 2024-11-19T12:45:00,347 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1913be47-5358-ff7c-2f97-6096e5f45004/cluster_9d93da93-c9ba-544b-3001-3bf8a224a26e/data/data1/current/BP-1627058744-172.17.0.2-1732020289496 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T12:45:00,347 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1913be47-5358-ff7c-2f97-6096e5f45004/cluster_9d93da93-c9ba-544b-3001-3bf8a224a26e/data/data2/current/BP-1627058744-172.17.0.2-1732020289496 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T12:45:00,348 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-19T12:45:00,348 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-19T12:45:00,348 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-19T12:45:00,358 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@62d6efd9{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-19T12:45:00,358 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@353d35a1{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-19T12:45:00,359 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-19T12:45:00,359 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@ce709a8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-19T12:45:00,359 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@760c69c0{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1913be47-5358-ff7c-2f97-6096e5f45004/hadoop.log.dir/,STOPPED} 2024-11-19T12:45:00,368 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-19T12:45:00,404 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-19T12:45:00,411 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestHBaseWalOnEC#testReadWrite[0] Thread=85 (was 157), OpenFileDescriptor=443 (was 391) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=312 (was 313), ProcessCount=11 (was 11), AvailableMemoryMB=6614 (was 7124) 2024-11-19T12:45:00,417 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestHBaseWalOnEC#testReadWrite[1] Thread=85, OpenFileDescriptor=443, MaxFileDescriptor=1048576, SystemLoadAverage=312, ProcessCount=11, AvailableMemoryMB=6613 2024-11-19T12:45:00,418 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=3, rsPorts=, rsClass=null, numDataNodes=3, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-19T12:45:00,418 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1913be47-5358-ff7c-2f97-6096e5f45004/hadoop.log.dir so I do NOT create it in target/test-data/18722424-cd1d-6749-9f51-db6fe4ba27fd 2024-11-19T12:45:00,418 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1913be47-5358-ff7c-2f97-6096e5f45004/hadoop.tmp.dir so I do NOT create it in target/test-data/18722424-cd1d-6749-9f51-db6fe4ba27fd 2024-11-19T12:45:00,418 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/18722424-cd1d-6749-9f51-db6fe4ba27fd/cluster_29a0d4db-def5-2c8e-3cf3-7fb964947374, deleteOnExit=true 2024-11-19T12:45:00,418 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-19T12:45:00,418 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/18722424-cd1d-6749-9f51-db6fe4ba27fd/test.cache.data in system properties and HBase conf 2024-11-19T12:45:00,419 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/18722424-cd1d-6749-9f51-db6fe4ba27fd/hadoop.tmp.dir in system properties and HBase conf 2024-11-19T12:45:00,419 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/18722424-cd1d-6749-9f51-db6fe4ba27fd/hadoop.log.dir in system properties and HBase conf 2024-11-19T12:45:00,419 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/18722424-cd1d-6749-9f51-db6fe4ba27fd/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-19T12:45:00,419 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/18722424-cd1d-6749-9f51-db6fe4ba27fd/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-19T12:45:00,419 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-19T12:45:00,419 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-19T12:45:00,420 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/18722424-cd1d-6749-9f51-db6fe4ba27fd/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-19T12:45:00,420 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/18722424-cd1d-6749-9f51-db6fe4ba27fd/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-19T12:45:00,420 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/18722424-cd1d-6749-9f51-db6fe4ba27fd/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-19T12:45:00,420 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/18722424-cd1d-6749-9f51-db6fe4ba27fd/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-19T12:45:00,421 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/18722424-cd1d-6749-9f51-db6fe4ba27fd/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-19T12:45:00,421 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/18722424-cd1d-6749-9f51-db6fe4ba27fd/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-19T12:45:00,421 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/18722424-cd1d-6749-9f51-db6fe4ba27fd/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-19T12:45:00,421 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/18722424-cd1d-6749-9f51-db6fe4ba27fd/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-19T12:45:00,421 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/18722424-cd1d-6749-9f51-db6fe4ba27fd/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-19T12:45:00,421 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/18722424-cd1d-6749-9f51-db6fe4ba27fd/nfs.dump.dir in system properties and HBase conf 2024-11-19T12:45:00,421 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/18722424-cd1d-6749-9f51-db6fe4ba27fd/java.io.tmpdir in system properties and HBase conf 2024-11-19T12:45:00,421 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/18722424-cd1d-6749-9f51-db6fe4ba27fd/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-19T12:45:00,421 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/18722424-cd1d-6749-9f51-db6fe4ba27fd/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-19T12:45:00,421 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/18722424-cd1d-6749-9f51-db6fe4ba27fd/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-19T12:45:00,729 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T12:45:00,735 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-19T12:45:00,743 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-19T12:45:00,743 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-19T12:45:00,743 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-19T12:45:00,744 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T12:45:00,745 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@51dee452{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/18722424-cd1d-6749-9f51-db6fe4ba27fd/hadoop.log.dir/,AVAILABLE} 2024-11-19T12:45:00,745 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4ed1f55a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-19T12:45:00,870 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1a400a96{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/18722424-cd1d-6749-9f51-db6fe4ba27fd/java.io.tmpdir/jetty-localhost-46039-hadoop-hdfs-3_4_1-tests_jar-_-any-3729698389943131868/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-19T12:45:00,871 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4072566{HTTP/1.1, (http/1.1)}{localhost:46039} 2024-11-19T12:45:00,871 INFO [Time-limited test {}] server.Server(415): Started @13543ms 2024-11-19T12:45:01,100 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T12:45:01,105 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-19T12:45:01,110 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-19T12:45:01,110 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-19T12:45:01,110 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-19T12:45:01,111 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@47030e3{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/18722424-cd1d-6749-9f51-db6fe4ba27fd/hadoop.log.dir/,AVAILABLE} 2024-11-19T12:45:01,111 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@443cc38c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-19T12:45:01,213 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@381275e5{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/18722424-cd1d-6749-9f51-db6fe4ba27fd/java.io.tmpdir/jetty-localhost-43825-hadoop-hdfs-3_4_1-tests_jar-_-any-9561339919823538562/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T12:45:01,213 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@68e19264{HTTP/1.1, (http/1.1)}{localhost:43825} 2024-11-19T12:45:01,213 INFO [Time-limited test {}] server.Server(415): Started @13885ms 2024-11-19T12:45:01,215 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-19T12:45:01,251 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T12:45:01,256 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-19T12:45:01,259 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-19T12:45:01,259 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-19T12:45:01,259 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-19T12:45:01,260 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@42c6a290{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/18722424-cd1d-6749-9f51-db6fe4ba27fd/hadoop.log.dir/,AVAILABLE} 2024-11-19T12:45:01,261 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@18d7f435{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-19T12:45:01,378 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@230b47a5{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/18722424-cd1d-6749-9f51-db6fe4ba27fd/java.io.tmpdir/jetty-localhost-42061-hadoop-hdfs-3_4_1-tests_jar-_-any-7745651398304141731/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T12:45:01,378 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@72785dee{HTTP/1.1, (http/1.1)}{localhost:42061} 2024-11-19T12:45:01,378 INFO [Time-limited test {}] server.Server(415): Started @14050ms 2024-11-19T12:45:01,380 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-19T12:45:01,414 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T12:45:01,419 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-19T12:45:01,420 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-19T12:45:01,420 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-19T12:45:01,420 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-19T12:45:01,420 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@49c714b2{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/18722424-cd1d-6749-9f51-db6fe4ba27fd/hadoop.log.dir/,AVAILABLE} 2024-11-19T12:45:01,421 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@736fee86{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-19T12:45:01,527 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@aa26abb{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/18722424-cd1d-6749-9f51-db6fe4ba27fd/java.io.tmpdir/jetty-localhost-44301-hadoop-hdfs-3_4_1-tests_jar-_-any-2057933183066635002/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T12:45:01,528 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3c97cc8b{HTTP/1.1, (http/1.1)}{localhost:44301} 2024-11-19T12:45:01,528 INFO [Time-limited test {}] server.Server(415): Started @14199ms 2024-11-19T12:45:01,529 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-19T12:45:01,993 WARN [Thread-559 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/18722424-cd1d-6749-9f51-db6fe4ba27fd/cluster_29a0d4db-def5-2c8e-3cf3-7fb964947374/data/data1/current/BP-542822039-172.17.0.2-1732020300446/current, will proceed with Du for space computation calculation, 2024-11-19T12:45:01,996 WARN [Thread-560 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/18722424-cd1d-6749-9f51-db6fe4ba27fd/cluster_29a0d4db-def5-2c8e-3cf3-7fb964947374/data/data2/current/BP-542822039-172.17.0.2-1732020300446/current, will proceed with Du for space computation calculation, 2024-11-19T12:45:02,015 WARN [Thread-501 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-19T12:45:02,018 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x3531034e207d1779 with lease ID 0xda6d08d2c2297341: Processing first storage report for DS-bb44c41d-c1cb-4667-9130-c7785fdc5f55 from datanode DatanodeRegistration(127.0.0.1:38635, datanodeUuid=7e925ba3-ea8c-4c13-b4d4-2806222a4a22, infoPort=39607, infoSecurePort=0, ipcPort=42847, storageInfo=lv=-57;cid=testClusterID;nsid=1947402413;c=1732020300446) 2024-11-19T12:45:02,018 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x3531034e207d1779 with lease ID 0xda6d08d2c2297341: from storage DS-bb44c41d-c1cb-4667-9130-c7785fdc5f55 node DatanodeRegistration(127.0.0.1:38635, datanodeUuid=7e925ba3-ea8c-4c13-b4d4-2806222a4a22, infoPort=39607, infoSecurePort=0, ipcPort=42847, storageInfo=lv=-57;cid=testClusterID;nsid=1947402413;c=1732020300446), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T12:45:02,018 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x3531034e207d1779 with lease ID 0xda6d08d2c2297341: Processing first storage report for DS-4a3250f2-3869-4579-b35b-2faccf59d6ee from datanode DatanodeRegistration(127.0.0.1:38635, datanodeUuid=7e925ba3-ea8c-4c13-b4d4-2806222a4a22, infoPort=39607, infoSecurePort=0, ipcPort=42847, storageInfo=lv=-57;cid=testClusterID;nsid=1947402413;c=1732020300446) 2024-11-19T12:45:02,018 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x3531034e207d1779 with lease ID 0xda6d08d2c2297341: from storage DS-4a3250f2-3869-4579-b35b-2faccf59d6ee node DatanodeRegistration(127.0.0.1:38635, datanodeUuid=7e925ba3-ea8c-4c13-b4d4-2806222a4a22, infoPort=39607, infoSecurePort=0, ipcPort=42847, storageInfo=lv=-57;cid=testClusterID;nsid=1947402413;c=1732020300446), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T12:45:02,278 WARN [Thread-573 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/18722424-cd1d-6749-9f51-db6fe4ba27fd/cluster_29a0d4db-def5-2c8e-3cf3-7fb964947374/data/data4/current/BP-542822039-172.17.0.2-1732020300446/current, will proceed with Du for space computation calculation, 2024-11-19T12:45:02,278 WARN [Thread-572 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/18722424-cd1d-6749-9f51-db6fe4ba27fd/cluster_29a0d4db-def5-2c8e-3cf3-7fb964947374/data/data3/current/BP-542822039-172.17.0.2-1732020300446/current, will proceed with Du for space computation calculation, 2024-11-19T12:45:02,300 WARN [Thread-524 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-19T12:45:02,303 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x15d101e1329f2afe with lease ID 0xda6d08d2c2297342: Processing first storage report for DS-6d25af2d-cce5-4d0a-a444-ec1d047aeaba from datanode DatanodeRegistration(127.0.0.1:38931, datanodeUuid=b4646c5e-69ff-4792-9bdd-6a9a6cea4661, infoPort=45517, infoSecurePort=0, ipcPort=45735, storageInfo=lv=-57;cid=testClusterID;nsid=1947402413;c=1732020300446) 2024-11-19T12:45:02,303 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x15d101e1329f2afe with lease ID 0xda6d08d2c2297342: from storage DS-6d25af2d-cce5-4d0a-a444-ec1d047aeaba node DatanodeRegistration(127.0.0.1:38931, datanodeUuid=b4646c5e-69ff-4792-9bdd-6a9a6cea4661, infoPort=45517, infoSecurePort=0, ipcPort=45735, storageInfo=lv=-57;cid=testClusterID;nsid=1947402413;c=1732020300446), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T12:45:02,303 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x15d101e1329f2afe with lease ID 0xda6d08d2c2297342: Processing first storage report for DS-b43c04c2-1997-4280-9f6f-0b755755c97b from datanode DatanodeRegistration(127.0.0.1:38931, datanodeUuid=b4646c5e-69ff-4792-9bdd-6a9a6cea4661, infoPort=45517, infoSecurePort=0, ipcPort=45735, storageInfo=lv=-57;cid=testClusterID;nsid=1947402413;c=1732020300446) 2024-11-19T12:45:02,303 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x15d101e1329f2afe with lease ID 0xda6d08d2c2297342: from storage DS-b43c04c2-1997-4280-9f6f-0b755755c97b node DatanodeRegistration(127.0.0.1:38931, datanodeUuid=b4646c5e-69ff-4792-9bdd-6a9a6cea4661, infoPort=45517, infoSecurePort=0, ipcPort=45735, storageInfo=lv=-57;cid=testClusterID;nsid=1947402413;c=1732020300446), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-19T12:45:02,391 WARN [Thread-583 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/18722424-cd1d-6749-9f51-db6fe4ba27fd/cluster_29a0d4db-def5-2c8e-3cf3-7fb964947374/data/data5/current/BP-542822039-172.17.0.2-1732020300446/current, will proceed with Du for space computation calculation, 2024-11-19T12:45:02,397 WARN [Thread-584 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/18722424-cd1d-6749-9f51-db6fe4ba27fd/cluster_29a0d4db-def5-2c8e-3cf3-7fb964947374/data/data6/current/BP-542822039-172.17.0.2-1732020300446/current, will proceed with Du for space computation calculation, 2024-11-19T12:45:02,423 WARN [Thread-547 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-19T12:45:02,426 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x5dc321c27c922129 with lease ID 0xda6d08d2c2297343: Processing first storage report for DS-4cc791b5-0d3b-404d-96bc-b5c7c93bc5bd from datanode DatanodeRegistration(127.0.0.1:37409, datanodeUuid=9c21a45e-ffa4-4275-a1a0-f79176683b36, infoPort=33137, infoSecurePort=0, ipcPort=42561, storageInfo=lv=-57;cid=testClusterID;nsid=1947402413;c=1732020300446) 2024-11-19T12:45:02,426 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x5dc321c27c922129 with lease ID 0xda6d08d2c2297343: from storage DS-4cc791b5-0d3b-404d-96bc-b5c7c93bc5bd node DatanodeRegistration(127.0.0.1:37409, datanodeUuid=9c21a45e-ffa4-4275-a1a0-f79176683b36, infoPort=33137, infoSecurePort=0, ipcPort=42561, storageInfo=lv=-57;cid=testClusterID;nsid=1947402413;c=1732020300446), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T12:45:02,426 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x5dc321c27c922129 with lease ID 0xda6d08d2c2297343: Processing first storage report for DS-7b253abe-3be4-463f-af6e-61c0858f21e3 from datanode DatanodeRegistration(127.0.0.1:37409, datanodeUuid=9c21a45e-ffa4-4275-a1a0-f79176683b36, infoPort=33137, infoSecurePort=0, ipcPort=42561, storageInfo=lv=-57;cid=testClusterID;nsid=1947402413;c=1732020300446) 2024-11-19T12:45:02,426 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x5dc321c27c922129 with lease ID 0xda6d08d2c2297343: from storage DS-7b253abe-3be4-463f-af6e-61c0858f21e3 node DatanodeRegistration(127.0.0.1:37409, datanodeUuid=9c21a45e-ffa4-4275-a1a0-f79176683b36, infoPort=33137, infoSecurePort=0, ipcPort=42561, storageInfo=lv=-57;cid=testClusterID;nsid=1947402413;c=1732020300446), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T12:45:02,477 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/18722424-cd1d-6749-9f51-db6fe4ba27fd 2024-11-19T12:45:02,484 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/18722424-cd1d-6749-9f51-db6fe4ba27fd/cluster_29a0d4db-def5-2c8e-3cf3-7fb964947374/zookeeper_0, clientPort=55862, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/18722424-cd1d-6749-9f51-db6fe4ba27fd/cluster_29a0d4db-def5-2c8e-3cf3-7fb964947374/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/18722424-cd1d-6749-9f51-db6fe4ba27fd/cluster_29a0d4db-def5-2c8e-3cf3-7fb964947374/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-19T12:45:02,485 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=55862 2024-11-19T12:45:02,485 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T12:45:02,487 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T12:45:02,505 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37409 is added to blk_1073741825_1001 (size=7) 2024-11-19T12:45:02,505 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38635 is added to blk_1073741825_1001 (size=7) 2024-11-19T12:45:02,506 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38931 is added to blk_1073741825_1001 (size=7) 2024-11-19T12:45:02,508 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:38985/user/jenkins/test-data/95554ad6-1972-a5a1-46ae-efaa7d31c8a0 with version=8 2024-11-19T12:45:02,508 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:44033/user/jenkins/test-data/462b85e5-3bd6-25b7-7743-cf7d8d82e30b/hbase-staging 2024-11-19T12:45:02,510 INFO [Time-limited test {}] client.ConnectionUtils(128): master/aba5a916dfea:0 server-side Connection retries=45 2024-11-19T12:45:02,510 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-19T12:45:02,510 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-19T12:45:02,510 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-19T12:45:02,510 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-19T12:45:02,510 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-19T12:45:02,511 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-19T12:45:02,511 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-19T12:45:02,512 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:40709 2024-11-19T12:45:02,514 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:40709 connecting to ZooKeeper ensemble=127.0.0.1:55862 2024-11-19T12:45:02,563 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:407090x0, quorum=127.0.0.1:55862, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-19T12:45:02,565 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:40709-0x101546a05550000 connected 2024-11-19T12:45:02,658 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T12:45:02,659 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T12:45:02,661 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:40709-0x101546a05550000, quorum=127.0.0.1:55862, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-19T12:45:02,661 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:38985/user/jenkins/test-data/95554ad6-1972-a5a1-46ae-efaa7d31c8a0, hbase.cluster.distributed=false 2024-11-19T12:45:02,663 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:40709-0x101546a05550000, quorum=127.0.0.1:55862, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-19T12:45:02,667 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=40709 2024-11-19T12:45:02,668 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=40709 2024-11-19T12:45:02,669 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=40709 2024-11-19T12:45:02,669 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=40709 2024-11-19T12:45:02,669 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=40709 2024-11-19T12:45:02,685 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/aba5a916dfea:0 server-side Connection retries=45 2024-11-19T12:45:02,685 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-19T12:45:02,686 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-19T12:45:02,686 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-19T12:45:02,686 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-19T12:45:02,686 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-19T12:45:02,686 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-19T12:45:02,686 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-19T12:45:02,687 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:38855 2024-11-19T12:45:02,688 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:38855 connecting to ZooKeeper ensemble=127.0.0.1:55862 2024-11-19T12:45:02,690 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T12:45:02,693 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T12:45:02,707 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:388550x0, quorum=127.0.0.1:55862, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-19T12:45:02,708 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:38855-0x101546a05550001 connected 2024-11-19T12:45:02,708 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38855-0x101546a05550001, quorum=127.0.0.1:55862, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-19T12:45:02,708 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-19T12:45:02,710 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-19T12:45:02,711 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38855-0x101546a05550001, quorum=127.0.0.1:55862, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-19T12:45:02,712 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38855-0x101546a05550001, quorum=127.0.0.1:55862, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-19T12:45:02,713 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=38855 2024-11-19T12:45:02,718 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=38855 2024-11-19T12:45:02,721 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=38855 2024-11-19T12:45:02,722 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=38855 2024-11-19T12:45:02,722 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=38855 2024-11-19T12:45:02,738 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/aba5a916dfea:0 server-side Connection retries=45 2024-11-19T12:45:02,738 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-19T12:45:02,738 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-19T12:45:02,738 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-19T12:45:02,738 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-19T12:45:02,738 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-19T12:45:02,738 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-19T12:45:02,738 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-19T12:45:02,739 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:41695 2024-11-19T12:45:02,740 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:41695 connecting to ZooKeeper ensemble=127.0.0.1:55862 2024-11-19T12:45:02,741 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T12:45:02,743 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T12:45:02,757 DEBUG [pool-330-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:416950x0, quorum=127.0.0.1:55862, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-19T12:45:02,757 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:416950x0, quorum=127.0.0.1:55862, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-19T12:45:02,757 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:41695-0x101546a05550002 connected 2024-11-19T12:45:02,758 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-19T12:45:02,762 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-19T12:45:02,763 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41695-0x101546a05550002, quorum=127.0.0.1:55862, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-19T12:45:02,764 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41695-0x101546a05550002, quorum=127.0.0.1:55862, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-19T12:45:02,767 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=41695 2024-11-19T12:45:02,767 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=41695 2024-11-19T12:45:02,770 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=41695 2024-11-19T12:45:02,771 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=41695 2024-11-19T12:45:02,774 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=41695 2024-11-19T12:45:02,792 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/aba5a916dfea:0 server-side Connection retries=45 2024-11-19T12:45:02,792 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-19T12:45:02,792 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-19T12:45:02,792 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-19T12:45:02,792 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-19T12:45:02,792 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-19T12:45:02,792 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-19T12:45:02,792 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-19T12:45:02,795 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:44751 2024-11-19T12:45:02,797 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:44751 connecting to ZooKeeper ensemble=127.0.0.1:55862 2024-11-19T12:45:02,797 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T12:45:02,800 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T12:45:02,815 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:447510x0, quorum=127.0.0.1:55862, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-19T12:45:02,816 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:447510x0, quorum=127.0.0.1:55862, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-19T12:45:02,816 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-19T12:45:02,818 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:44751-0x101546a05550003 connected 2024-11-19T12:45:02,819 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-19T12:45:02,820 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44751-0x101546a05550003, quorum=127.0.0.1:55862, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-19T12:45:02,822 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44751-0x101546a05550003, quorum=127.0.0.1:55862, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-19T12:45:02,827 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=44751 2024-11-19T12:45:02,830 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=44751 2024-11-19T12:45:02,830 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=44751 2024-11-19T12:45:02,833 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=44751 2024-11-19T12:45:02,834 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=44751 2024-11-19T12:45:02,847 DEBUG [M:0;aba5a916dfea:40709 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;aba5a916dfea:40709 2024-11-19T12:45:02,847 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/aba5a916dfea,40709,1732020302510 2024-11-19T12:45:02,856 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44751-0x101546a05550003, quorum=127.0.0.1:55862, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-19T12:45:02,856 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40709-0x101546a05550000, quorum=127.0.0.1:55862, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-19T12:45:02,857 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38855-0x101546a05550001, quorum=127.0.0.1:55862, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-19T12:45:02,856 DEBUG [pool-330-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41695-0x101546a05550002, quorum=127.0.0.1:55862, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-19T12:45:02,857 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:40709-0x101546a05550000, quorum=127.0.0.1:55862, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/aba5a916dfea,40709,1732020302510 2024-11-19T12:45:02,865 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38855-0x101546a05550001, quorum=127.0.0.1:55862, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-19T12:45:02,865 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44751-0x101546a05550003, quorum=127.0.0.1:55862, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-19T12:45:02,865 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38855-0x101546a05550001, quorum=127.0.0.1:55862, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T12:45:02,865 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44751-0x101546a05550003, quorum=127.0.0.1:55862, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T12:45:02,865 DEBUG [pool-330-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41695-0x101546a05550002, quorum=127.0.0.1:55862, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-19T12:45:02,865 DEBUG [pool-330-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41695-0x101546a05550002, quorum=127.0.0.1:55862, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T12:45:02,866 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:40709-0x101546a05550000, quorum=127.0.0.1:55862, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-19T12:45:02,866 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/aba5a916dfea,40709,1732020302510 from backup master directory 2024-11-19T12:45:02,870 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40709-0x101546a05550000, quorum=127.0.0.1:55862, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T12:45:02,873 DEBUG [pool-330-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41695-0x101546a05550002, quorum=127.0.0.1:55862, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-19T12:45:02,873 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38855-0x101546a05550001, quorum=127.0.0.1:55862, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-19T12:45:02,873 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40709-0x101546a05550000, quorum=127.0.0.1:55862, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/aba5a916dfea,40709,1732020302510 2024-11-19T12:45:02,873 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44751-0x101546a05550003, quorum=127.0.0.1:55862, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-19T12:45:02,873 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40709-0x101546a05550000, quorum=127.0.0.1:55862, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-19T12:45:02,873 WARN [master/aba5a916dfea:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-19T12:45:02,873 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=aba5a916dfea,40709,1732020302510 2024-11-19T12:45:02,880 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:38985/user/jenkins/test-data/95554ad6-1972-a5a1-46ae-efaa7d31c8a0/hbase.id] with ID: 7b975329-423d-4ab7-b8ca-2638e88e2a7d 2024-11-19T12:45:02,880 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:38985/user/jenkins/test-data/95554ad6-1972-a5a1-46ae-efaa7d31c8a0/.tmp/hbase.id 2024-11-19T12:45:02,891 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38931 is added to blk_1073741826_1002 (size=42) 2024-11-19T12:45:02,891 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37409 is added to blk_1073741826_1002 (size=42) 2024-11-19T12:45:02,893 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38635 is added to blk_1073741826_1002 (size=42) 2024-11-19T12:45:02,893 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:38985/user/jenkins/test-data/95554ad6-1972-a5a1-46ae-efaa7d31c8a0/.tmp/hbase.id]:[hdfs://localhost:38985/user/jenkins/test-data/95554ad6-1972-a5a1-46ae-efaa7d31c8a0/hbase.id] 2024-11-19T12:45:02,912 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T12:45:02,912 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-19T12:45:02,914 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-11-19T12:45:02,923 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38855-0x101546a05550001, quorum=127.0.0.1:55862, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T12:45:02,923 DEBUG [pool-330-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41695-0x101546a05550002, quorum=127.0.0.1:55862, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T12:45:02,923 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40709-0x101546a05550000, quorum=127.0.0.1:55862, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T12:45:02,924 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44751-0x101546a05550003, quorum=127.0.0.1:55862, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T12:45:02,937 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38931 is added to blk_1073741827_1003 (size=196) 2024-11-19T12:45:02,937 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38635 is added to blk_1073741827_1003 (size=196) 2024-11-19T12:45:02,937 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37409 is added to blk_1073741827_1003 (size=196) 2024-11-19T12:45:02,938 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-19T12:45:02,939 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-19T12:45:02,940 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-19T12:45:02,956 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38635 is added to blk_1073741828_1004 (size=1189) 2024-11-19T12:45:02,956 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38931 is added to blk_1073741828_1004 (size=1189) 2024-11-19T12:45:02,956 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37409 is added to blk_1073741828_1004 (size=1189) 2024-11-19T12:45:02,957 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:38985/user/jenkins/test-data/95554ad6-1972-a5a1-46ae-efaa7d31c8a0/MasterData/data/master/store 2024-11-19T12:45:02,968 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38635 is added to blk_1073741829_1005 (size=34) 2024-11-19T12:45:02,968 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37409 is added to blk_1073741829_1005 (size=34) 2024-11-19T12:45:02,969 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38931 is added to blk_1073741829_1005 (size=34) 2024-11-19T12:45:02,969 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T12:45:02,969 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-19T12:45:02,969 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T12:45:02,969 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T12:45:02,969 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-19T12:45:02,969 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T12:45:02,969 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T12:45:02,969 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732020302969Disabling compacts and flushes for region at 1732020302969Disabling writes for close at 1732020302969Writing region close event to WAL at 1732020302969Closed at 1732020302969 2024-11-19T12:45:02,971 WARN [master/aba5a916dfea:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:38985/user/jenkins/test-data/95554ad6-1972-a5a1-46ae-efaa7d31c8a0/MasterData/data/master/store/.initializing 2024-11-19T12:45:02,971 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:38985/user/jenkins/test-data/95554ad6-1972-a5a1-46ae-efaa7d31c8a0/MasterData/WALs/aba5a916dfea,40709,1732020302510 2024-11-19T12:45:02,975 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=aba5a916dfea%2C40709%2C1732020302510, suffix=, logDir=hdfs://localhost:38985/user/jenkins/test-data/95554ad6-1972-a5a1-46ae-efaa7d31c8a0/MasterData/WALs/aba5a916dfea,40709,1732020302510, archiveDir=hdfs://localhost:38985/user/jenkins/test-data/95554ad6-1972-a5a1-46ae-efaa7d31c8a0/MasterData/oldWALs, maxLogs=10 2024-11-19T12:45:02,976 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor aba5a916dfea%2C40709%2C1732020302510.1732020302975 2024-11-19T12:45:02,994 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/95554ad6-1972-a5a1-46ae-efaa7d31c8a0/MasterData/WALs/aba5a916dfea,40709,1732020302510/aba5a916dfea%2C40709%2C1732020302510.1732020302975 2024-11-19T12:45:03,001 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45517:45517),(127.0.0.1/127.0.0.1:33137:33137),(127.0.0.1/127.0.0.1:39607:39607)] 2024-11-19T12:45:03,011 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-19T12:45:03,011 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T12:45:03,011 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T12:45:03,012 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T12:45:03,015 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-19T12:45:03,017 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-19T12:45:03,017 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:45:03,018 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T12:45:03,019 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-19T12:45:03,021 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-19T12:45:03,021 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:45:03,022 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-19T12:45:03,022 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-19T12:45:03,025 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-19T12:45:03,025 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:45:03,026 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-19T12:45:03,026 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-19T12:45:03,029 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-19T12:45:03,029 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:45:03,030 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-19T12:45:03,031 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T12:45:03,032 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38985/user/jenkins/test-data/95554ad6-1972-a5a1-46ae-efaa7d31c8a0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-19T12:45:03,033 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38985/user/jenkins/test-data/95554ad6-1972-a5a1-46ae-efaa7d31c8a0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-19T12:45:03,035 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T12:45:03,035 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T12:45:03,035 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-19T12:45:03,037 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T12:45:03,045 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38985/user/jenkins/test-data/95554ad6-1972-a5a1-46ae-efaa7d31c8a0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-19T12:45:03,046 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=70575876, jitterRate=0.05166250467300415}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-19T12:45:03,047 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732020303012Initializing all the Stores at 1732020303014 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732020303014Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732020303014Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732020303014Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732020303014Cleaning up temporary data from old regions at 1732020303035 (+21 ms)Region opened successfully at 1732020303047 (+12 ms) 2024-11-19T12:45:03,050 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-19T12:45:03,057 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@95063fd, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=aba5a916dfea/172.17.0.2:0 2024-11-19T12:45:03,058 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-19T12:45:03,058 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-19T12:45:03,058 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-19T12:45:03,058 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-19T12:45:03,059 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-19T12:45:03,060 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-19T12:45:03,060 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-19T12:45:03,062 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-19T12:45:03,064 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40709-0x101546a05550000, quorum=127.0.0.1:55862, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-19T12:45:03,079 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-19T12:45:03,079 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-19T12:45:03,080 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40709-0x101546a05550000, quorum=127.0.0.1:55862, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-19T12:45:03,090 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-19T12:45:03,090 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-19T12:45:03,091 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40709-0x101546a05550000, quorum=127.0.0.1:55862, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-19T12:45:03,098 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-19T12:45:03,099 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40709-0x101546a05550000, quorum=127.0.0.1:55862, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-19T12:45:03,106 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-19T12:45:03,108 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40709-0x101546a05550000, quorum=127.0.0.1:55862, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-19T12:45:03,114 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-19T12:45:03,115 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-19T12:45:03,124 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T12:45:03,127 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40709-0x101546a05550000, quorum=127.0.0.1:55862, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-19T12:45:03,127 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38855-0x101546a05550001, quorum=127.0.0.1:55862, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-19T12:45:03,127 DEBUG [pool-330-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41695-0x101546a05550002, quorum=127.0.0.1:55862, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-19T12:45:03,127 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40709-0x101546a05550000, quorum=127.0.0.1:55862, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T12:45:03,127 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38855-0x101546a05550001, quorum=127.0.0.1:55862, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T12:45:03,127 DEBUG [pool-330-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41695-0x101546a05550002, quorum=127.0.0.1:55862, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T12:45:03,127 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44751-0x101546a05550003, quorum=127.0.0.1:55862, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-19T12:45:03,127 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44751-0x101546a05550003, quorum=127.0.0.1:55862, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T12:45:03,128 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=aba5a916dfea,40709,1732020302510, sessionid=0x101546a05550000, setting cluster-up flag (Was=false) 2024-11-19T12:45:03,149 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40709-0x101546a05550000, quorum=127.0.0.1:55862, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T12:45:03,149 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44751-0x101546a05550003, quorum=127.0.0.1:55862, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T12:45:03,149 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38855-0x101546a05550001, quorum=127.0.0.1:55862, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T12:45:03,149 DEBUG [pool-330-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41695-0x101546a05550002, quorum=127.0.0.1:55862, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T12:45:03,173 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-19T12:45:03,174 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=aba5a916dfea,40709,1732020302510 2024-11-19T12:45:03,185 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T12:45:03,186 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T12:45:03,190 DEBUG [pool-330-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41695-0x101546a05550002, quorum=127.0.0.1:55862, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T12:45:03,190 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38855-0x101546a05550001, quorum=127.0.0.1:55862, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T12:45:03,190 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40709-0x101546a05550000, quorum=127.0.0.1:55862, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T12:45:03,190 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44751-0x101546a05550003, quorum=127.0.0.1:55862, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T12:45:03,215 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-19T12:45:03,216 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=aba5a916dfea,40709,1732020302510 2024-11-19T12:45:03,218 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:38985/user/jenkins/test-data/95554ad6-1972-a5a1-46ae-efaa7d31c8a0/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-19T12:45:03,221 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-19T12:45:03,221 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-19T12:45:03,221 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-19T12:45:03,221 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: aba5a916dfea,40709,1732020302510 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-19T12:45:03,231 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/aba5a916dfea:0, corePoolSize=5, maxPoolSize=5 2024-11-19T12:45:03,231 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/aba5a916dfea:0, corePoolSize=5, maxPoolSize=5 2024-11-19T12:45:03,231 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/aba5a916dfea:0, corePoolSize=5, maxPoolSize=5 2024-11-19T12:45:03,231 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/aba5a916dfea:0, corePoolSize=5, maxPoolSize=5 2024-11-19T12:45:03,231 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/aba5a916dfea:0, corePoolSize=10, maxPoolSize=10 2024-11-19T12:45:03,231 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/aba5a916dfea:0, corePoolSize=1, maxPoolSize=1 2024-11-19T12:45:03,232 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/aba5a916dfea:0, corePoolSize=2, maxPoolSize=2 2024-11-19T12:45:03,232 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/aba5a916dfea:0, corePoolSize=1, maxPoolSize=1 2024-11-19T12:45:03,236 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732020333236 2024-11-19T12:45:03,236 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-19T12:45:03,236 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-19T12:45:03,236 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-19T12:45:03,236 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-19T12:45:03,236 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-19T12:45:03,236 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-19T12:45:03,237 INFO [RS:0;aba5a916dfea:38855 {}] regionserver.HRegionServer(746): ClusterId : 7b975329-423d-4ab7-b8ca-2638e88e2a7d 2024-11-19T12:45:03,237 DEBUG [RS:0;aba5a916dfea:38855 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-19T12:45:03,237 INFO [RS:1;aba5a916dfea:41695 {}] regionserver.HRegionServer(746): ClusterId : 7b975329-423d-4ab7-b8ca-2638e88e2a7d 2024-11-19T12:45:03,237 DEBUG [RS:1;aba5a916dfea:41695 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-19T12:45:03,238 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-19T12:45:03,238 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-19T12:45:03,238 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-19T12:45:03,238 INFO [RS:2;aba5a916dfea:44751 {}] regionserver.HRegionServer(746): ClusterId : 7b975329-423d-4ab7-b8ca-2638e88e2a7d 2024-11-19T12:45:03,238 DEBUG [RS:2;aba5a916dfea:44751 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-19T12:45:03,238 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-19T12:45:03,238 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-19T12:45:03,239 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-19T12:45:03,239 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-19T12:45:03,239 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-19T12:45:03,239 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/aba5a916dfea:0:becomeActiveMaster-HFileCleaner.large.0-1732020303239,5,FailOnTimeoutGroup] 2024-11-19T12:45:03,239 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/aba5a916dfea:0:becomeActiveMaster-HFileCleaner.small.0-1732020303239,5,FailOnTimeoutGroup] 2024-11-19T12:45:03,240 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:45:03,240 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-19T12:45:03,240 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-19T12:45:03,240 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-19T12:45:03,240 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-19T12:45:03,240 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-19T12:45:03,257 DEBUG [RS:1;aba5a916dfea:41695 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-19T12:45:03,257 DEBUG [RS:1;aba5a916dfea:41695 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-19T12:45:03,258 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37409 is added to blk_1073741831_1007 (size=1321) 2024-11-19T12:45:03,259 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38635 is added to blk_1073741831_1007 (size=1321) 2024-11-19T12:45:03,259 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38931 is added to blk_1073741831_1007 (size=1321) 2024-11-19T12:45:03,262 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:38985/user/jenkins/test-data/95554ad6-1972-a5a1-46ae-efaa7d31c8a0/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-19T12:45:03,262 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:38985/user/jenkins/test-data/95554ad6-1972-a5a1-46ae-efaa7d31c8a0 2024-11-19T12:45:03,265 DEBUG [RS:0;aba5a916dfea:38855 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-19T12:45:03,265 DEBUG [RS:2;aba5a916dfea:44751 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-19T12:45:03,265 DEBUG [RS:2;aba5a916dfea:44751 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-19T12:45:03,265 DEBUG [RS:0;aba5a916dfea:38855 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-19T12:45:03,274 DEBUG [RS:1;aba5a916dfea:41695 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-19T12:45:03,274 DEBUG [RS:0;aba5a916dfea:38855 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-19T12:45:03,274 DEBUG [RS:1;aba5a916dfea:41695 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3e03e342, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=aba5a916dfea/172.17.0.2:0 2024-11-19T12:45:03,274 DEBUG [RS:0;aba5a916dfea:38855 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@50053b6d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=aba5a916dfea/172.17.0.2:0 2024-11-19T12:45:03,275 DEBUG [RS:2;aba5a916dfea:44751 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-19T12:45:03,275 DEBUG [RS:2;aba5a916dfea:44751 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5a5d0187, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=aba5a916dfea/172.17.0.2:0 2024-11-19T12:45:03,279 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37409 is added to blk_1073741832_1008 (size=32) 2024-11-19T12:45:03,280 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38635 is added to blk_1073741832_1008 (size=32) 2024-11-19T12:45:03,280 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38931 is added to blk_1073741832_1008 (size=32) 2024-11-19T12:45:03,292 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T12:45:03,292 DEBUG [RS:0;aba5a916dfea:38855 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;aba5a916dfea:38855 2024-11-19T12:45:03,292 DEBUG [RS:2;aba5a916dfea:44751 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:2;aba5a916dfea:44751 2024-11-19T12:45:03,293 INFO [RS:0;aba5a916dfea:38855 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-19T12:45:03,293 INFO [RS:0;aba5a916dfea:38855 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-19T12:45:03,293 DEBUG [RS:0;aba5a916dfea:38855 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-19T12:45:03,293 INFO [RS:2;aba5a916dfea:44751 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-19T12:45:03,293 INFO [RS:2;aba5a916dfea:44751 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-19T12:45:03,293 DEBUG [RS:2;aba5a916dfea:44751 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-19T12:45:03,294 INFO [RS:2;aba5a916dfea:44751 {}] regionserver.HRegionServer(2659): reportForDuty to master=aba5a916dfea,40709,1732020302510 with port=44751, startcode=1732020302791 2024-11-19T12:45:03,294 DEBUG [RS:2;aba5a916dfea:44751 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-19T12:45:03,294 DEBUG [RS:1;aba5a916dfea:41695 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;aba5a916dfea:41695 2024-11-19T12:45:03,294 INFO [RS:1;aba5a916dfea:41695 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-19T12:45:03,294 INFO [RS:1;aba5a916dfea:41695 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-19T12:45:03,294 DEBUG [RS:1;aba5a916dfea:41695 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-19T12:45:03,294 INFO [RS:0;aba5a916dfea:38855 {}] regionserver.HRegionServer(2659): reportForDuty to master=aba5a916dfea,40709,1732020302510 with port=38855, startcode=1732020302685 2024-11-19T12:45:03,294 DEBUG [RS:0;aba5a916dfea:38855 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-19T12:45:03,295 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-19T12:45:03,295 INFO [RS:1;aba5a916dfea:41695 {}] regionserver.HRegionServer(2659): reportForDuty to master=aba5a916dfea,40709,1732020302510 with port=41695, startcode=1732020302737 2024-11-19T12:45:03,295 DEBUG [RS:1;aba5a916dfea:41695 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-19T12:45:03,297 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-19T12:45:03,297 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54237, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.4 (auth:SIMPLE), service=RegionServerStatusService 2024-11-19T12:45:03,297 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:45:03,298 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40709 {}] master.ServerManager(363): Checking decommissioned status of RegionServer aba5a916dfea,41695,1732020302737 2024-11-19T12:45:03,298 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40709 {}] master.ServerManager(517): Registering regionserver=aba5a916dfea,41695,1732020302737 2024-11-19T12:45:03,298 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T12:45:03,299 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-19T12:45:03,299 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37977, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.3 (auth:SIMPLE), service=RegionServerStatusService 2024-11-19T12:45:03,299 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51495, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.5 (auth:SIMPLE), service=RegionServerStatusService 2024-11-19T12:45:03,301 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40709 {}] master.ServerManager(363): Checking decommissioned status of RegionServer aba5a916dfea,38855,1732020302685 2024-11-19T12:45:03,301 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40709 {}] master.ServerManager(517): Registering regionserver=aba5a916dfea,38855,1732020302685 2024-11-19T12:45:03,301 DEBUG [RS:1;aba5a916dfea:41695 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:38985/user/jenkins/test-data/95554ad6-1972-a5a1-46ae-efaa7d31c8a0 2024-11-19T12:45:03,301 DEBUG [RS:1;aba5a916dfea:41695 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:38985 2024-11-19T12:45:03,301 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-19T12:45:03,301 DEBUG [RS:1;aba5a916dfea:41695 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-19T12:45:03,301 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:45:03,302 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T12:45:03,302 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-19T12:45:03,303 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40709 {}] master.ServerManager(363): Checking decommissioned status of RegionServer aba5a916dfea,44751,1732020302791 2024-11-19T12:45:03,303 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40709 {}] master.ServerManager(517): Registering regionserver=aba5a916dfea,44751,1732020302791 2024-11-19T12:45:03,303 DEBUG [RS:0;aba5a916dfea:38855 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:38985/user/jenkins/test-data/95554ad6-1972-a5a1-46ae-efaa7d31c8a0 2024-11-19T12:45:03,304 DEBUG [RS:0;aba5a916dfea:38855 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:38985 2024-11-19T12:45:03,304 DEBUG [RS:0;aba5a916dfea:38855 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-19T12:45:03,306 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-19T12:45:03,306 DEBUG [RS:2;aba5a916dfea:44751 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:38985/user/jenkins/test-data/95554ad6-1972-a5a1-46ae-efaa7d31c8a0 2024-11-19T12:45:03,306 DEBUG [RS:2;aba5a916dfea:44751 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:38985 2024-11-19T12:45:03,306 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:45:03,306 DEBUG [RS:2;aba5a916dfea:44751 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-19T12:45:03,307 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T12:45:03,307 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-19T12:45:03,309 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-19T12:45:03,309 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:45:03,310 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T12:45:03,310 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-19T12:45:03,311 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38985/user/jenkins/test-data/95554ad6-1972-a5a1-46ae-efaa7d31c8a0/data/hbase/meta/1588230740 2024-11-19T12:45:03,311 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38985/user/jenkins/test-data/95554ad6-1972-a5a1-46ae-efaa7d31c8a0/data/hbase/meta/1588230740 2024-11-19T12:45:03,312 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40709-0x101546a05550000, quorum=127.0.0.1:55862, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-19T12:45:03,314 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-19T12:45:03,314 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-19T12:45:03,315 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-19T12:45:03,326 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-19T12:45:03,330 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38985/user/jenkins/test-data/95554ad6-1972-a5a1-46ae-efaa7d31c8a0/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-19T12:45:03,331 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=73224527, jitterRate=0.09113048017024994}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-19T12:45:03,332 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732020303292Initializing all the Stores at 1732020303293 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732020303293Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732020303294 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732020303294Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732020303294Cleaning up temporary data from old regions at 1732020303315 (+21 ms)Region opened successfully at 1732020303332 (+17 ms) 2024-11-19T12:45:03,332 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-19T12:45:03,332 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-19T12:45:03,332 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-19T12:45:03,332 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-19T12:45:03,332 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-19T12:45:03,333 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-19T12:45:03,333 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732020303332Disabling compacts and flushes for region at 1732020303332Disabling writes for close at 1732020303332Writing region close event to WAL at 1732020303333 (+1 ms)Closed at 1732020303333 2024-11-19T12:45:03,335 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-19T12:45:03,335 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-19T12:45:03,335 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-19T12:45:03,338 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-19T12:45:03,339 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-19T12:45:03,348 DEBUG [RS:1;aba5a916dfea:41695 {}] zookeeper.ZKUtil(111): regionserver:41695-0x101546a05550002, quorum=127.0.0.1:55862, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/aba5a916dfea,41695,1732020302737 2024-11-19T12:45:03,348 WARN [RS:1;aba5a916dfea:41695 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-19T12:45:03,348 INFO [RS:1;aba5a916dfea:41695 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-19T12:45:03,348 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [aba5a916dfea,44751,1732020302791] 2024-11-19T12:45:03,348 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [aba5a916dfea,38855,1732020302685] 2024-11-19T12:45:03,348 DEBUG [RS:1;aba5a916dfea:41695 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:38985/user/jenkins/test-data/95554ad6-1972-a5a1-46ae-efaa7d31c8a0/WALs/aba5a916dfea,41695,1732020302737 2024-11-19T12:45:03,348 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [aba5a916dfea,41695,1732020302737] 2024-11-19T12:45:03,348 DEBUG [RS:0;aba5a916dfea:38855 {}] zookeeper.ZKUtil(111): regionserver:38855-0x101546a05550001, quorum=127.0.0.1:55862, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/aba5a916dfea,38855,1732020302685 2024-11-19T12:45:03,349 WARN [RS:0;aba5a916dfea:38855 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-19T12:45:03,349 INFO [RS:0;aba5a916dfea:38855 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-19T12:45:03,349 DEBUG [RS:0;aba5a916dfea:38855 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:38985/user/jenkins/test-data/95554ad6-1972-a5a1-46ae-efaa7d31c8a0/WALs/aba5a916dfea,38855,1732020302685 2024-11-19T12:45:03,349 DEBUG [RS:2;aba5a916dfea:44751 {}] zookeeper.ZKUtil(111): regionserver:44751-0x101546a05550003, quorum=127.0.0.1:55862, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/aba5a916dfea,44751,1732020302791 2024-11-19T12:45:03,349 WARN [RS:2;aba5a916dfea:44751 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-19T12:45:03,350 INFO [RS:2;aba5a916dfea:44751 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-19T12:45:03,350 DEBUG [RS:2;aba5a916dfea:44751 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:38985/user/jenkins/test-data/95554ad6-1972-a5a1-46ae-efaa7d31c8a0/WALs/aba5a916dfea,44751,1732020302791 2024-11-19T12:45:03,354 INFO [RS:0;aba5a916dfea:38855 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-19T12:45:03,356 INFO [RS:2;aba5a916dfea:44751 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-19T12:45:03,358 INFO [RS:1;aba5a916dfea:41695 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-19T12:45:03,358 INFO [RS:0;aba5a916dfea:38855 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-19T12:45:03,363 INFO [RS:2;aba5a916dfea:44751 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-19T12:45:03,363 INFO [RS:1;aba5a916dfea:41695 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-19T12:45:03,363 INFO [RS:0;aba5a916dfea:38855 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-19T12:45:03,363 INFO [RS:0;aba5a916dfea:38855 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-19T12:45:03,363 INFO [RS:2;aba5a916dfea:44751 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-19T12:45:03,364 INFO [RS:2;aba5a916dfea:44751 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-19T12:45:03,364 INFO [RS:1;aba5a916dfea:41695 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-19T12:45:03,364 INFO [RS:1;aba5a916dfea:41695 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-19T12:45:03,366 INFO [RS:0;aba5a916dfea:38855 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-19T12:45:03,367 INFO [RS:2;aba5a916dfea:44751 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-19T12:45:03,368 INFO [RS:0;aba5a916dfea:38855 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-19T12:45:03,368 INFO [RS:2;aba5a916dfea:44751 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-19T12:45:03,368 INFO [RS:2;aba5a916dfea:44751 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-19T12:45:03,368 INFO [RS:0;aba5a916dfea:38855 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-19T12:45:03,368 DEBUG [RS:2;aba5a916dfea:44751 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/aba5a916dfea:0, corePoolSize=1, maxPoolSize=1 2024-11-19T12:45:03,368 DEBUG [RS:0;aba5a916dfea:38855 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/aba5a916dfea:0, corePoolSize=1, maxPoolSize=1 2024-11-19T12:45:03,368 DEBUG [RS:2;aba5a916dfea:44751 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/aba5a916dfea:0, corePoolSize=1, maxPoolSize=1 2024-11-19T12:45:03,368 DEBUG [RS:0;aba5a916dfea:38855 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/aba5a916dfea:0, corePoolSize=1, maxPoolSize=1 2024-11-19T12:45:03,368 DEBUG [RS:2;aba5a916dfea:44751 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/aba5a916dfea:0, corePoolSize=1, maxPoolSize=1 2024-11-19T12:45:03,368 DEBUG [RS:0;aba5a916dfea:38855 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/aba5a916dfea:0, corePoolSize=1, maxPoolSize=1 2024-11-19T12:45:03,368 DEBUG [RS:2;aba5a916dfea:44751 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/aba5a916dfea:0, corePoolSize=1, maxPoolSize=1 2024-11-19T12:45:03,368 DEBUG [RS:0;aba5a916dfea:38855 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/aba5a916dfea:0, corePoolSize=1, maxPoolSize=1 2024-11-19T12:45:03,368 DEBUG [RS:2;aba5a916dfea:44751 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/aba5a916dfea:0, corePoolSize=1, maxPoolSize=1 2024-11-19T12:45:03,368 DEBUG [RS:0;aba5a916dfea:38855 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/aba5a916dfea:0, corePoolSize=1, maxPoolSize=1 2024-11-19T12:45:03,369 DEBUG [RS:2;aba5a916dfea:44751 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/aba5a916dfea:0, corePoolSize=2, maxPoolSize=2 2024-11-19T12:45:03,369 DEBUG [RS:0;aba5a916dfea:38855 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/aba5a916dfea:0, corePoolSize=2, maxPoolSize=2 2024-11-19T12:45:03,369 DEBUG [RS:2;aba5a916dfea:44751 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/aba5a916dfea:0, corePoolSize=1, maxPoolSize=1 2024-11-19T12:45:03,369 DEBUG [RS:0;aba5a916dfea:38855 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/aba5a916dfea:0, corePoolSize=1, maxPoolSize=1 2024-11-19T12:45:03,369 DEBUG [RS:2;aba5a916dfea:44751 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/aba5a916dfea:0, corePoolSize=1, maxPoolSize=1 2024-11-19T12:45:03,369 DEBUG [RS:0;aba5a916dfea:38855 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/aba5a916dfea:0, corePoolSize=1, maxPoolSize=1 2024-11-19T12:45:03,369 DEBUG [RS:2;aba5a916dfea:44751 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/aba5a916dfea:0, corePoolSize=1, maxPoolSize=1 2024-11-19T12:45:03,369 DEBUG [RS:0;aba5a916dfea:38855 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/aba5a916dfea:0, corePoolSize=1, maxPoolSize=1 2024-11-19T12:45:03,369 DEBUG [RS:2;aba5a916dfea:44751 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/aba5a916dfea:0, corePoolSize=1, maxPoolSize=1 2024-11-19T12:45:03,369 DEBUG [RS:0;aba5a916dfea:38855 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/aba5a916dfea:0, corePoolSize=1, maxPoolSize=1 2024-11-19T12:45:03,369 DEBUG [RS:2;aba5a916dfea:44751 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/aba5a916dfea:0, corePoolSize=1, maxPoolSize=1 2024-11-19T12:45:03,369 DEBUG [RS:0;aba5a916dfea:38855 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/aba5a916dfea:0, corePoolSize=1, maxPoolSize=1 2024-11-19T12:45:03,369 DEBUG [RS:2;aba5a916dfea:44751 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/aba5a916dfea:0, corePoolSize=1, maxPoolSize=1 2024-11-19T12:45:03,369 DEBUG [RS:0;aba5a916dfea:38855 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/aba5a916dfea:0, corePoolSize=1, maxPoolSize=1 2024-11-19T12:45:03,369 DEBUG [RS:2;aba5a916dfea:44751 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/aba5a916dfea:0, corePoolSize=3, maxPoolSize=3 2024-11-19T12:45:03,369 DEBUG [RS:0;aba5a916dfea:38855 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/aba5a916dfea:0, corePoolSize=3, maxPoolSize=3 2024-11-19T12:45:03,369 DEBUG [RS:2;aba5a916dfea:44751 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/aba5a916dfea:0, corePoolSize=3, maxPoolSize=3 2024-11-19T12:45:03,369 DEBUG [RS:0;aba5a916dfea:38855 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/aba5a916dfea:0, corePoolSize=3, maxPoolSize=3 2024-11-19T12:45:03,370 INFO [RS:1;aba5a916dfea:41695 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-19T12:45:03,372 INFO [RS:1;aba5a916dfea:41695 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-19T12:45:03,372 INFO [RS:1;aba5a916dfea:41695 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-19T12:45:03,372 DEBUG [RS:1;aba5a916dfea:41695 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/aba5a916dfea:0, corePoolSize=1, maxPoolSize=1 2024-11-19T12:45:03,372 DEBUG [RS:1;aba5a916dfea:41695 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/aba5a916dfea:0, corePoolSize=1, maxPoolSize=1 2024-11-19T12:45:03,372 DEBUG [RS:1;aba5a916dfea:41695 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/aba5a916dfea:0, corePoolSize=1, maxPoolSize=1 2024-11-19T12:45:03,372 DEBUG [RS:1;aba5a916dfea:41695 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/aba5a916dfea:0, corePoolSize=1, maxPoolSize=1 2024-11-19T12:45:03,373 DEBUG [RS:1;aba5a916dfea:41695 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/aba5a916dfea:0, corePoolSize=1, maxPoolSize=1 2024-11-19T12:45:03,373 DEBUG [RS:1;aba5a916dfea:41695 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/aba5a916dfea:0, corePoolSize=2, maxPoolSize=2 2024-11-19T12:45:03,373 DEBUG [RS:1;aba5a916dfea:41695 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/aba5a916dfea:0, corePoolSize=1, maxPoolSize=1 2024-11-19T12:45:03,373 DEBUG [RS:1;aba5a916dfea:41695 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/aba5a916dfea:0, corePoolSize=1, maxPoolSize=1 2024-11-19T12:45:03,373 DEBUG [RS:1;aba5a916dfea:41695 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/aba5a916dfea:0, corePoolSize=1, maxPoolSize=1 2024-11-19T12:45:03,373 DEBUG [RS:1;aba5a916dfea:41695 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/aba5a916dfea:0, corePoolSize=1, maxPoolSize=1 2024-11-19T12:45:03,373 DEBUG [RS:1;aba5a916dfea:41695 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/aba5a916dfea:0, corePoolSize=1, maxPoolSize=1 2024-11-19T12:45:03,373 DEBUG [RS:1;aba5a916dfea:41695 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/aba5a916dfea:0, corePoolSize=1, maxPoolSize=1 2024-11-19T12:45:03,373 DEBUG [RS:1;aba5a916dfea:41695 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/aba5a916dfea:0, corePoolSize=3, maxPoolSize=3 2024-11-19T12:45:03,373 DEBUG [RS:1;aba5a916dfea:41695 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/aba5a916dfea:0, corePoolSize=3, maxPoolSize=3 2024-11-19T12:45:03,374 INFO [RS:2;aba5a916dfea:44751 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-19T12:45:03,374 INFO [RS:2;aba5a916dfea:44751 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-19T12:45:03,375 INFO [RS:0;aba5a916dfea:38855 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-19T12:45:03,375 INFO [RS:2;aba5a916dfea:44751 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-19T12:45:03,375 INFO [RS:0;aba5a916dfea:38855 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-19T12:45:03,375 INFO [RS:2;aba5a916dfea:44751 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-19T12:45:03,375 INFO [RS:0;aba5a916dfea:38855 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-19T12:45:03,375 INFO [RS:2;aba5a916dfea:44751 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-19T12:45:03,375 INFO [RS:0;aba5a916dfea:38855 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-19T12:45:03,375 INFO [RS:2;aba5a916dfea:44751 {}] hbase.ChoreService(168): Chore ScheduledChore name=aba5a916dfea,44751,1732020302791-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-19T12:45:03,375 INFO [RS:0;aba5a916dfea:38855 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-19T12:45:03,375 INFO [RS:0;aba5a916dfea:38855 {}] hbase.ChoreService(168): Chore ScheduledChore name=aba5a916dfea,38855,1732020302685-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-19T12:45:03,375 INFO [RS:1;aba5a916dfea:41695 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-19T12:45:03,375 INFO [RS:1;aba5a916dfea:41695 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-19T12:45:03,375 INFO [RS:1;aba5a916dfea:41695 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-19T12:45:03,376 INFO [RS:1;aba5a916dfea:41695 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-19T12:45:03,376 INFO [RS:1;aba5a916dfea:41695 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-19T12:45:03,376 INFO [RS:1;aba5a916dfea:41695 {}] hbase.ChoreService(168): Chore ScheduledChore name=aba5a916dfea,41695,1732020302737-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-19T12:45:03,391 INFO [RS:0;aba5a916dfea:38855 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-19T12:45:03,391 INFO [RS:2;aba5a916dfea:44751 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-19T12:45:03,391 INFO [RS:2;aba5a916dfea:44751 {}] hbase.ChoreService(168): Chore ScheduledChore name=aba5a916dfea,44751,1732020302791-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-19T12:45:03,391 INFO [RS:0;aba5a916dfea:38855 {}] hbase.ChoreService(168): Chore ScheduledChore name=aba5a916dfea,38855,1732020302685-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-19T12:45:03,391 INFO [RS:0;aba5a916dfea:38855 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T12:45:03,391 INFO [RS:2;aba5a916dfea:44751 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T12:45:03,391 INFO [RS:0;aba5a916dfea:38855 {}] regionserver.Replication(171): aba5a916dfea,38855,1732020302685 started 2024-11-19T12:45:03,391 INFO [RS:2;aba5a916dfea:44751 {}] regionserver.Replication(171): aba5a916dfea,44751,1732020302791 started 2024-11-19T12:45:03,392 INFO [RS:1;aba5a916dfea:41695 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-19T12:45:03,392 INFO [RS:1;aba5a916dfea:41695 {}] hbase.ChoreService(168): Chore ScheduledChore name=aba5a916dfea,41695,1732020302737-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-19T12:45:03,392 INFO [RS:1;aba5a916dfea:41695 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T12:45:03,392 INFO [RS:1;aba5a916dfea:41695 {}] regionserver.Replication(171): aba5a916dfea,41695,1732020302737 started 2024-11-19T12:45:03,407 INFO [RS:0;aba5a916dfea:38855 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T12:45:03,407 INFO [RS:0;aba5a916dfea:38855 {}] regionserver.HRegionServer(1482): Serving as aba5a916dfea,38855,1732020302685, RpcServer on aba5a916dfea/172.17.0.2:38855, sessionid=0x101546a05550001 2024-11-19T12:45:03,407 DEBUG [RS:0;aba5a916dfea:38855 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-19T12:45:03,407 DEBUG [RS:0;aba5a916dfea:38855 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager aba5a916dfea,38855,1732020302685 2024-11-19T12:45:03,407 DEBUG [RS:0;aba5a916dfea:38855 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'aba5a916dfea,38855,1732020302685' 2024-11-19T12:45:03,408 DEBUG [RS:0;aba5a916dfea:38855 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-19T12:45:03,408 INFO [RS:2;aba5a916dfea:44751 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T12:45:03,408 INFO [RS:2;aba5a916dfea:44751 {}] regionserver.HRegionServer(1482): Serving as aba5a916dfea,44751,1732020302791, RpcServer on aba5a916dfea/172.17.0.2:44751, sessionid=0x101546a05550003 2024-11-19T12:45:03,408 DEBUG [RS:0;aba5a916dfea:38855 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-19T12:45:03,408 DEBUG [RS:2;aba5a916dfea:44751 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-19T12:45:03,408 DEBUG [RS:2;aba5a916dfea:44751 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager aba5a916dfea,44751,1732020302791 2024-11-19T12:45:03,409 DEBUG [RS:0;aba5a916dfea:38855 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-19T12:45:03,409 DEBUG [RS:0;aba5a916dfea:38855 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-19T12:45:03,409 DEBUG [RS:0;aba5a916dfea:38855 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager aba5a916dfea,38855,1732020302685 2024-11-19T12:45:03,409 DEBUG [RS:0;aba5a916dfea:38855 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'aba5a916dfea,38855,1732020302685' 2024-11-19T12:45:03,409 DEBUG [RS:0;aba5a916dfea:38855 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-19T12:45:03,410 INFO [RS:1;aba5a916dfea:41695 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T12:45:03,410 INFO [RS:1;aba5a916dfea:41695 {}] regionserver.HRegionServer(1482): Serving as aba5a916dfea,41695,1732020302737, RpcServer on aba5a916dfea/172.17.0.2:41695, sessionid=0x101546a05550002 2024-11-19T12:45:03,410 DEBUG [RS:0;aba5a916dfea:38855 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-19T12:45:03,410 DEBUG [RS:1;aba5a916dfea:41695 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-19T12:45:03,410 DEBUG [RS:1;aba5a916dfea:41695 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager aba5a916dfea,41695,1732020302737 2024-11-19T12:45:03,410 DEBUG [RS:1;aba5a916dfea:41695 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'aba5a916dfea,41695,1732020302737' 2024-11-19T12:45:03,410 DEBUG [RS:2;aba5a916dfea:44751 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'aba5a916dfea,44751,1732020302791' 2024-11-19T12:45:03,410 DEBUG [RS:1;aba5a916dfea:41695 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-19T12:45:03,410 DEBUG [RS:2;aba5a916dfea:44751 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-19T12:45:03,411 DEBUG [RS:0;aba5a916dfea:38855 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-19T12:45:03,411 DEBUG [RS:1;aba5a916dfea:41695 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-19T12:45:03,411 INFO [RS:0;aba5a916dfea:38855 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-19T12:45:03,411 INFO [RS:0;aba5a916dfea:38855 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-19T12:45:03,411 DEBUG [RS:2;aba5a916dfea:44751 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-19T12:45:03,411 DEBUG [RS:1;aba5a916dfea:41695 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-19T12:45:03,411 DEBUG [RS:2;aba5a916dfea:44751 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-19T12:45:03,411 DEBUG [RS:1;aba5a916dfea:41695 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-19T12:45:03,411 DEBUG [RS:2;aba5a916dfea:44751 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-19T12:45:03,411 DEBUG [RS:1;aba5a916dfea:41695 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager aba5a916dfea,41695,1732020302737 2024-11-19T12:45:03,411 DEBUG [RS:2;aba5a916dfea:44751 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager aba5a916dfea,44751,1732020302791 2024-11-19T12:45:03,411 DEBUG [RS:2;aba5a916dfea:44751 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'aba5a916dfea,44751,1732020302791' 2024-11-19T12:45:03,411 DEBUG [RS:1;aba5a916dfea:41695 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'aba5a916dfea,41695,1732020302737' 2024-11-19T12:45:03,411 DEBUG [RS:2;aba5a916dfea:44751 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-19T12:45:03,411 DEBUG [RS:1;aba5a916dfea:41695 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-19T12:45:03,412 DEBUG [RS:2;aba5a916dfea:44751 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-19T12:45:03,412 DEBUG [RS:1;aba5a916dfea:41695 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-19T12:45:03,412 DEBUG [RS:2;aba5a916dfea:44751 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-19T12:45:03,412 INFO [RS:2;aba5a916dfea:44751 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-19T12:45:03,412 DEBUG [RS:1;aba5a916dfea:41695 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-19T12:45:03,412 INFO [RS:2;aba5a916dfea:44751 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-19T12:45:03,412 INFO [RS:1;aba5a916dfea:41695 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-19T12:45:03,412 INFO [RS:1;aba5a916dfea:41695 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-19T12:45:03,489 WARN [aba5a916dfea:40709 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-19T12:45:03,514 INFO [RS:0;aba5a916dfea:38855 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=aba5a916dfea%2C38855%2C1732020302685, suffix=, logDir=hdfs://localhost:38985/user/jenkins/test-data/95554ad6-1972-a5a1-46ae-efaa7d31c8a0/WALs/aba5a916dfea,38855,1732020302685, archiveDir=hdfs://localhost:38985/user/jenkins/test-data/95554ad6-1972-a5a1-46ae-efaa7d31c8a0/oldWALs, maxLogs=32 2024-11-19T12:45:03,515 INFO [RS:2;aba5a916dfea:44751 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=aba5a916dfea%2C44751%2C1732020302791, suffix=, logDir=hdfs://localhost:38985/user/jenkins/test-data/95554ad6-1972-a5a1-46ae-efaa7d31c8a0/WALs/aba5a916dfea,44751,1732020302791, archiveDir=hdfs://localhost:38985/user/jenkins/test-data/95554ad6-1972-a5a1-46ae-efaa7d31c8a0/oldWALs, maxLogs=32 2024-11-19T12:45:03,515 INFO [RS:1;aba5a916dfea:41695 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=aba5a916dfea%2C41695%2C1732020302737, suffix=, logDir=hdfs://localhost:38985/user/jenkins/test-data/95554ad6-1972-a5a1-46ae-efaa7d31c8a0/WALs/aba5a916dfea,41695,1732020302737, archiveDir=hdfs://localhost:38985/user/jenkins/test-data/95554ad6-1972-a5a1-46ae-efaa7d31c8a0/oldWALs, maxLogs=32 2024-11-19T12:45:03,517 INFO [RS:0;aba5a916dfea:38855 {}] monitor.StreamSlowMonitor(122): New stream slow monitor aba5a916dfea%2C38855%2C1732020302685.1732020303516 2024-11-19T12:45:03,518 INFO [RS:1;aba5a916dfea:41695 {}] monitor.StreamSlowMonitor(122): New stream slow monitor aba5a916dfea%2C41695%2C1732020302737.1732020303517 2024-11-19T12:45:03,518 INFO [RS:2;aba5a916dfea:44751 {}] monitor.StreamSlowMonitor(122): New stream slow monitor aba5a916dfea%2C44751%2C1732020302791.1732020303517 2024-11-19T12:45:03,542 INFO [RS:0;aba5a916dfea:38855 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/95554ad6-1972-a5a1-46ae-efaa7d31c8a0/WALs/aba5a916dfea,38855,1732020302685/aba5a916dfea%2C38855%2C1732020302685.1732020303516 2024-11-19T12:45:03,543 INFO [RS:1;aba5a916dfea:41695 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/95554ad6-1972-a5a1-46ae-efaa7d31c8a0/WALs/aba5a916dfea,41695,1732020302737/aba5a916dfea%2C41695%2C1732020302737.1732020303517 2024-11-19T12:45:03,549 INFO [RS:2;aba5a916dfea:44751 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/95554ad6-1972-a5a1-46ae-efaa7d31c8a0/WALs/aba5a916dfea,44751,1732020302791/aba5a916dfea%2C44751%2C1732020302791.1732020303517 2024-11-19T12:45:03,563 DEBUG [RS:0;aba5a916dfea:38855 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45517:45517),(127.0.0.1/127.0.0.1:33137:33137),(127.0.0.1/127.0.0.1:39607:39607)] 2024-11-19T12:45:03,563 DEBUG [RS:1;aba5a916dfea:41695 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45517:45517),(127.0.0.1/127.0.0.1:39607:39607),(127.0.0.1/127.0.0.1:33137:33137)] 2024-11-19T12:45:03,563 DEBUG [RS:2;aba5a916dfea:44751 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33137:33137),(127.0.0.1/127.0.0.1:45517:45517),(127.0.0.1/127.0.0.1:39607:39607)] 2024-11-19T12:45:03,740 DEBUG [aba5a916dfea:40709 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=3, allServersCount=3 2024-11-19T12:45:03,740 DEBUG [aba5a916dfea:40709 {}] balancer.BalancerClusterState(204): Hosts are {aba5a916dfea=0} racks are {/default-rack=0} 2024-11-19T12:45:03,743 DEBUG [aba5a916dfea:40709 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-19T12:45:03,743 DEBUG [aba5a916dfea:40709 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-19T12:45:03,743 DEBUG [aba5a916dfea:40709 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-19T12:45:03,743 DEBUG [aba5a916dfea:40709 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-19T12:45:03,743 DEBUG [aba5a916dfea:40709 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-19T12:45:03,743 DEBUG [aba5a916dfea:40709 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-19T12:45:03,743 INFO [aba5a916dfea:40709 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-19T12:45:03,743 INFO [aba5a916dfea:40709 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-19T12:45:03,743 INFO [aba5a916dfea:40709 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-19T12:45:03,743 DEBUG [aba5a916dfea:40709 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-19T12:45:03,744 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=aba5a916dfea,38855,1732020302685 2024-11-19T12:45:03,746 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as aba5a916dfea,38855,1732020302685, state=OPENING 2024-11-19T12:45:03,773 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-19T12:45:03,831 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40709-0x101546a05550000, quorum=127.0.0.1:55862, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T12:45:03,831 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38855-0x101546a05550001, quorum=127.0.0.1:55862, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T12:45:03,831 DEBUG [pool-330-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41695-0x101546a05550002, quorum=127.0.0.1:55862, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T12:45:03,831 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44751-0x101546a05550003, quorum=127.0.0.1:55862, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T12:45:03,832 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-19T12:45:03,832 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-19T12:45:03,832 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-19T12:45:03,832 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=aba5a916dfea,38855,1732020302685}] 2024-11-19T12:45:03,833 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-19T12:45:03,833 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-19T12:45:03,987 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-19T12:45:03,988 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54111, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-19T12:45:03,999 INFO [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-19T12:45:03,999 INFO [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-19T12:45:04,002 INFO [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=aba5a916dfea%2C38855%2C1732020302685.meta, suffix=.meta, logDir=hdfs://localhost:38985/user/jenkins/test-data/95554ad6-1972-a5a1-46ae-efaa7d31c8a0/WALs/aba5a916dfea,38855,1732020302685, archiveDir=hdfs://localhost:38985/user/jenkins/test-data/95554ad6-1972-a5a1-46ae-efaa7d31c8a0/oldWALs, maxLogs=32 2024-11-19T12:45:04,004 INFO [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor aba5a916dfea%2C38855%2C1732020302685.meta.1732020304004.meta 2024-11-19T12:45:04,017 INFO [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/95554ad6-1972-a5a1-46ae-efaa7d31c8a0/WALs/aba5a916dfea,38855,1732020302685/aba5a916dfea%2C38855%2C1732020302685.meta.1732020304004.meta 2024-11-19T12:45:04,026 DEBUG [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45517:45517),(127.0.0.1/127.0.0.1:39607:39607),(127.0.0.1/127.0.0.1:33137:33137)] 2024-11-19T12:45:04,031 DEBUG [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-19T12:45:04,031 DEBUG [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-19T12:45:04,031 DEBUG [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-19T12:45:04,032 INFO [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-19T12:45:04,032 DEBUG [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-19T12:45:04,032 DEBUG [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T12:45:04,032 DEBUG [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-19T12:45:04,032 DEBUG [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-19T12:45:04,035 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-19T12:45:04,036 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-19T12:45:04,036 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:45:04,037 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T12:45:04,037 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-19T12:45:04,038 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-19T12:45:04,039 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:45:04,040 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T12:45:04,040 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-19T12:45:04,042 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-19T12:45:04,042 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:45:04,043 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T12:45:04,043 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-19T12:45:04,045 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-19T12:45:04,045 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:45:04,045 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T12:45:04,046 DEBUG [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-19T12:45:04,046 DEBUG [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38985/user/jenkins/test-data/95554ad6-1972-a5a1-46ae-efaa7d31c8a0/data/hbase/meta/1588230740 2024-11-19T12:45:04,048 DEBUG [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38985/user/jenkins/test-data/95554ad6-1972-a5a1-46ae-efaa7d31c8a0/data/hbase/meta/1588230740 2024-11-19T12:45:04,049 DEBUG [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-19T12:45:04,049 DEBUG [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-19T12:45:04,050 DEBUG [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-19T12:45:04,052 DEBUG [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-19T12:45:04,053 INFO [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=65479145, jitterRate=-0.02428470551967621}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-19T12:45:04,053 DEBUG [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-19T12:45:04,055 DEBUG [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732020304032Writing region info on filesystem at 1732020304032Initializing all the Stores at 1732020304034 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732020304034Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732020304034Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732020304034Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732020304034Cleaning up temporary data from old regions at 1732020304049 (+15 ms)Running coprocessor post-open hooks at 1732020304053 (+4 ms)Region opened successfully at 1732020304054 (+1 ms) 2024-11-19T12:45:04,057 INFO [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732020303986 2024-11-19T12:45:04,060 DEBUG [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-19T12:45:04,060 INFO [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-19T12:45:04,062 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=aba5a916dfea,38855,1732020302685 2024-11-19T12:45:04,064 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as aba5a916dfea,38855,1732020302685, state=OPEN 2024-11-19T12:45:04,082 DEBUG [pool-330-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41695-0x101546a05550002, quorum=127.0.0.1:55862, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-19T12:45:04,082 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40709-0x101546a05550000, quorum=127.0.0.1:55862, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-19T12:45:04,082 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-19T12:45:04,082 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=aba5a916dfea,38855,1732020302685 2024-11-19T12:45:04,082 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44751-0x101546a05550003, quorum=127.0.0.1:55862, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-19T12:45:04,082 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-19T12:45:04,082 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-19T12:45:04,082 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38855-0x101546a05550001, quorum=127.0.0.1:55862, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-19T12:45:04,083 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-19T12:45:04,090 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-19T12:45:04,090 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=aba5a916dfea,38855,1732020302685 in 250 msec 2024-11-19T12:45:04,096 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-19T12:45:04,096 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 755 msec 2024-11-19T12:45:04,097 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-19T12:45:04,097 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-19T12:45:04,099 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-19T12:45:04,099 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=aba5a916dfea,38855,1732020302685, seqNum=-1] 2024-11-19T12:45:04,100 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-19T12:45:04,102 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43757, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-19T12:45:04,117 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 894 msec 2024-11-19T12:45:04,117 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732020304117, completionTime=-1 2024-11-19T12:45:04,117 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=3; waited=0ms, expected min=3 server(s), max=3 server(s), master is running 2024-11-19T12:45:04,117 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-19T12:45:04,120 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=3 2024-11-19T12:45:04,120 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732020364120 2024-11-19T12:45:04,120 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732020424120 2024-11-19T12:45:04,120 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 3 msec 2024-11-19T12:45:04,121 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 1588230740 changed from -1.0 to 0.0, refreshing cache 2024-11-19T12:45:04,121 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=aba5a916dfea,40709,1732020302510-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-19T12:45:04,121 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=aba5a916dfea,40709,1732020302510-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T12:45:04,122 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=aba5a916dfea,40709,1732020302510-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T12:45:04,122 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-aba5a916dfea:40709, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T12:45:04,122 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-19T12:45:04,122 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-19T12:45:04,125 DEBUG [master/aba5a916dfea:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-19T12:45:04,129 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.256sec 2024-11-19T12:45:04,130 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-19T12:45:04,131 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-19T12:45:04,131 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-19T12:45:04,131 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-19T12:45:04,131 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-19T12:45:04,131 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=aba5a916dfea,40709,1732020302510-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-19T12:45:04,131 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=aba5a916dfea,40709,1732020302510-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-19T12:45:04,135 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-19T12:45:04,135 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-19T12:45:04,135 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=aba5a916dfea,40709,1732020302510-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T12:45:04,139 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@c14b991, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-19T12:45:04,139 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request aba5a916dfea,40709,-1 for getting cluster id 2024-11-19T12:45:04,139 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-19T12:45:04,144 DEBUG [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '7b975329-423d-4ab7-b8ca-2638e88e2a7d' 2024-11-19T12:45:04,144 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-19T12:45:04,144 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "7b975329-423d-4ab7-b8ca-2638e88e2a7d" 2024-11-19T12:45:04,145 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@245b41da, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-19T12:45:04,145 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [aba5a916dfea,40709,-1] 2024-11-19T12:45:04,145 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-19T12:45:04,145 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T12:45:04,147 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33590, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-19T12:45:04,149 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@51a2c16c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-19T12:45:04,149 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-19T12:45:04,152 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=aba5a916dfea,38855,1732020302685, seqNum=-1] 2024-11-19T12:45:04,152 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-19T12:45:04,156 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50580, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-19T12:45:04,159 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=aba5a916dfea,40709,1732020302510 2024-11-19T12:45:04,160 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-19T12:45:04,161 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] client.AsyncConnectionImpl(321): The fetched master address is aba5a916dfea,40709,1732020302510 2024-11-19T12:45:04,161 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@2f5891f6 2024-11-19T12:45:04,162 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-19T12:45:04,164 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33598, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-19T12:45:04,165 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40709 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-19T12:45:04,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40709 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC 2024-11-19T12:45:04,173 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_PRE_OPERATION 2024-11-19T12:45:04,174 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:45:04,174 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40709 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestHBaseWalOnEC" procId is: 4 2024-11-19T12:45:04,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40709 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-19T12:45:04,176 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-19T12:45:04,189 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38635 is added to blk_1073741837_1013 (size=392) 2024-11-19T12:45:04,189 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38931 is added to blk_1073741837_1013 (size=392) 2024-11-19T12:45:04,190 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37409 is added to blk_1073741837_1013 (size=392) 2024-11-19T12:45:04,193 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 2e07664289617f26c2020d61a6828327, NAME => 'TestHBaseWalOnEC,,1732020304165.2e07664289617f26c2020d61a6828327.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:38985/user/jenkins/test-data/95554ad6-1972-a5a1-46ae-efaa7d31c8a0 2024-11-19T12:45:04,212 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38635 is added to blk_1073741838_1014 (size=51) 2024-11-19T12:45:04,213 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38931 is added to blk_1073741838_1014 (size=51) 2024-11-19T12:45:04,214 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37409 is added to blk_1073741838_1014 (size=51) 2024-11-19T12:45:04,214 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1732020304165.2e07664289617f26c2020d61a6828327.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T12:45:04,214 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1722): Closing 2e07664289617f26c2020d61a6828327, disabling compactions & flushes 2024-11-19T12:45:04,214 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1732020304165.2e07664289617f26c2020d61a6828327. 2024-11-19T12:45:04,214 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1732020304165.2e07664289617f26c2020d61a6828327. 2024-11-19T12:45:04,214 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1732020304165.2e07664289617f26c2020d61a6828327. after waiting 0 ms 2024-11-19T12:45:04,214 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1732020304165.2e07664289617f26c2020d61a6828327. 2024-11-19T12:45:04,214 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1732020304165.2e07664289617f26c2020d61a6828327. 2024-11-19T12:45:04,214 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1676): Region close journal for 2e07664289617f26c2020d61a6828327: Waiting for close lock at 1732020304214Disabling compacts and flushes for region at 1732020304214Disabling writes for close at 1732020304214Writing region close event to WAL at 1732020304214Closed at 1732020304214 2024-11-19T12:45:04,217 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ADD_TO_META 2024-11-19T12:45:04,217 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestHBaseWalOnEC,,1732020304165.2e07664289617f26c2020d61a6828327.","families":{"info":[{"qualifier":"regioninfo","vlen":50,"tag":[],"timestamp":"1732020304217"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732020304217"}]},"ts":"1732020304217"} 2024-11-19T12:45:04,221 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-19T12:45:04,224 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-19T12:45:04,224 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732020304224"}]},"ts":"1732020304224"} 2024-11-19T12:45:04,228 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLING in hbase:meta 2024-11-19T12:45:04,229 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {aba5a916dfea=0} racks are {/default-rack=0} 2024-11-19T12:45:04,230 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-19T12:45:04,230 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-19T12:45:04,230 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-19T12:45:04,230 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-19T12:45:04,230 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-19T12:45:04,230 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-19T12:45:04,230 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-19T12:45:04,230 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-19T12:45:04,230 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-19T12:45:04,230 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-19T12:45:04,231 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=2e07664289617f26c2020d61a6828327, ASSIGN}] 2024-11-19T12:45:04,233 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=2e07664289617f26c2020d61a6828327, ASSIGN 2024-11-19T12:45:04,235 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=2e07664289617f26c2020d61a6828327, ASSIGN; state=OFFLINE, location=aba5a916dfea,41695,1732020302737; forceNewPlan=false, retain=false 2024-11-19T12:45:04,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40709 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-19T12:45:04,387 INFO [aba5a916dfea:40709 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-11-19T12:45:04,387 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=2e07664289617f26c2020d61a6828327, regionState=OPENING, regionLocation=aba5a916dfea,41695,1732020302737 2024-11-19T12:45:04,391 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-10-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=2e07664289617f26c2020d61a6828327, ASSIGN because future has completed 2024-11-19T12:45:04,392 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 2e07664289617f26c2020d61a6828327, server=aba5a916dfea,41695,1732020302737}] 2024-11-19T12:45:04,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40709 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-19T12:45:04,547 DEBUG [RSProcedureDispatcher-pool-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-19T12:45:04,549 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-9-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39099, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-19T12:45:04,553 INFO [RS_OPEN_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestHBaseWalOnEC,,1732020304165.2e07664289617f26c2020d61a6828327. 2024-11-19T12:45:04,553 DEBUG [RS_OPEN_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 2e07664289617f26c2020d61a6828327, NAME => 'TestHBaseWalOnEC,,1732020304165.2e07664289617f26c2020d61a6828327.', STARTKEY => '', ENDKEY => ''} 2024-11-19T12:45:04,554 DEBUG [RS_OPEN_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestHBaseWalOnEC 2e07664289617f26c2020d61a6828327 2024-11-19T12:45:04,554 DEBUG [RS_OPEN_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1732020304165.2e07664289617f26c2020d61a6828327.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T12:45:04,554 DEBUG [RS_OPEN_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 2e07664289617f26c2020d61a6828327 2024-11-19T12:45:04,554 DEBUG [RS_OPEN_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 2e07664289617f26c2020d61a6828327 2024-11-19T12:45:04,556 INFO [StoreOpener-2e07664289617f26c2020d61a6828327-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 2e07664289617f26c2020d61a6828327 2024-11-19T12:45:04,558 INFO [StoreOpener-2e07664289617f26c2020d61a6828327-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 2e07664289617f26c2020d61a6828327 columnFamilyName cf 2024-11-19T12:45:04,559 DEBUG [StoreOpener-2e07664289617f26c2020d61a6828327-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:45:04,559 INFO [StoreOpener-2e07664289617f26c2020d61a6828327-1 {}] regionserver.HStore(327): Store=2e07664289617f26c2020d61a6828327/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-19T12:45:04,560 DEBUG [RS_OPEN_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 2e07664289617f26c2020d61a6828327 2024-11-19T12:45:04,561 DEBUG [RS_OPEN_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38985/user/jenkins/test-data/95554ad6-1972-a5a1-46ae-efaa7d31c8a0/data/default/TestHBaseWalOnEC/2e07664289617f26c2020d61a6828327 2024-11-19T12:45:04,561 DEBUG [RS_OPEN_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38985/user/jenkins/test-data/95554ad6-1972-a5a1-46ae-efaa7d31c8a0/data/default/TestHBaseWalOnEC/2e07664289617f26c2020d61a6828327 2024-11-19T12:45:04,561 DEBUG [RS_OPEN_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 2e07664289617f26c2020d61a6828327 2024-11-19T12:45:04,562 DEBUG [RS_OPEN_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 2e07664289617f26c2020d61a6828327 2024-11-19T12:45:04,564 DEBUG [RS_OPEN_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 2e07664289617f26c2020d61a6828327 2024-11-19T12:45:04,566 DEBUG [RS_OPEN_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38985/user/jenkins/test-data/95554ad6-1972-a5a1-46ae-efaa7d31c8a0/data/default/TestHBaseWalOnEC/2e07664289617f26c2020d61a6828327/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-19T12:45:04,568 INFO [RS_OPEN_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 2e07664289617f26c2020d61a6828327; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=72051170, jitterRate=0.07364609837532043}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-19T12:45:04,568 DEBUG [RS_OPEN_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 2e07664289617f26c2020d61a6828327 2024-11-19T12:45:04,569 DEBUG [RS_OPEN_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 2e07664289617f26c2020d61a6828327: Running coprocessor pre-open hook at 1732020304554Writing region info on filesystem at 1732020304554Initializing all the Stores at 1732020304555 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732020304556 (+1 ms)Cleaning up temporary data from old regions at 1732020304562 (+6 ms)Running coprocessor post-open hooks at 1732020304568 (+6 ms)Region opened successfully at 1732020304569 (+1 ms) 2024-11-19T12:45:04,570 INFO [RS_OPEN_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestHBaseWalOnEC,,1732020304165.2e07664289617f26c2020d61a6828327., pid=6, masterSystemTime=1732020304547 2024-11-19T12:45:04,574 DEBUG [RS_OPEN_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestHBaseWalOnEC,,1732020304165.2e07664289617f26c2020d61a6828327. 2024-11-19T12:45:04,574 INFO [RS_OPEN_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestHBaseWalOnEC,,1732020304165.2e07664289617f26c2020d61a6828327. 2024-11-19T12:45:04,575 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=2e07664289617f26c2020d61a6828327, regionState=OPEN, openSeqNum=2, regionLocation=aba5a916dfea,41695,1732020302737 2024-11-19T12:45:04,578 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-19T12:45:04,578 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-19T12:45:04,579 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-10-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 2e07664289617f26c2020d61a6828327, server=aba5a916dfea,41695,1732020302737 because future has completed 2024-11-19T12:45:04,581 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-19T12:45:04,581 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-19T12:45:04,582 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-19T12:45:04,582 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-19T12:45:04,583 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestHBaseWalOnEC 2024-11-19T12:45:04,583 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestHBaseWalOnEC Metrics about Tables on a single HBase RegionServer 2024-11-19T12:45:04,588 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-19T12:45:04,588 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 2e07664289617f26c2020d61a6828327, server=aba5a916dfea,41695,1732020302737 in 190 msec 2024-11-19T12:45:04,592 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-19T12:45:04,592 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=2e07664289617f26c2020d61a6828327, ASSIGN in 357 msec 2024-11-19T12:45:04,594 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-19T12:45:04,594 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732020304594"}]},"ts":"1732020304594"} 2024-11-19T12:45:04,598 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLED in hbase:meta 2024-11-19T12:45:04,601 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_POST_OPERATION 2024-11-19T12:45:04,606 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC in 436 msec 2024-11-19T12:45:04,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40709 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-19T12:45:04,805 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table TestHBaseWalOnEC get assigned. Timeout = 60000ms 2024-11-19T12:45:04,805 INFO [RPCClient-NioEventLoopGroup-6-8 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestHBaseWalOnEC completed 2024-11-19T12:45:04,805 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-19T12:45:04,808 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table TestHBaseWalOnEC assigned to meta. Checking AM states. 2024-11-19T12:45:04,809 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-19T12:45:04,809 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table TestHBaseWalOnEC assigned. 2024-11-19T12:45:04,813 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestHBaseWalOnEC', row='row', locateType=CURRENT is [region=TestHBaseWalOnEC,,1732020304165.2e07664289617f26c2020d61a6828327., hostname=aba5a916dfea,41695,1732020302737, seqNum=2] 2024-11-19T12:45:04,813 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-19T12:45:04,819 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-9-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41078, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-19T12:45:04,823 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40709 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestHBaseWalOnEC 2024-11-19T12:45:04,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40709 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC 2024-11-19T12:45:04,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40709 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-19T12:45:04,826 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_PREPARE 2024-11-19T12:45:04,828 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-19T12:45:04,828 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-19T12:45:04,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40709 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-19T12:45:04,984 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41695 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-11-19T12:45:04,985 DEBUG [RS_FLUSH_OPERATIONS-regionserver/aba5a916dfea:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestHBaseWalOnEC,,1732020304165.2e07664289617f26c2020d61a6828327. 2024-11-19T12:45:04,985 INFO [RS_FLUSH_OPERATIONS-regionserver/aba5a916dfea:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing 2e07664289617f26c2020d61a6828327 1/1 column families, dataSize=32 B heapSize=360 B 2024-11-19T12:45:05,008 DEBUG [RS_FLUSH_OPERATIONS-regionserver/aba5a916dfea:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38985/user/jenkins/test-data/95554ad6-1972-a5a1-46ae-efaa7d31c8a0/data/default/TestHBaseWalOnEC/2e07664289617f26c2020d61a6828327/.tmp/cf/3ce7f31deb9b4e60940fb59236782865 is 36, key is row/cf:cq/1732020304820/Put/seqid=0 2024-11-19T12:45:05,018 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38931 is added to blk_1073741839_1015 (size=4787) 2024-11-19T12:45:05,019 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37409 is added to blk_1073741839_1015 (size=4787) 2024-11-19T12:45:05,019 INFO [RS_FLUSH_OPERATIONS-regionserver/aba5a916dfea:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=32 B at sequenceid=5 (bloomFilter=false), to=hdfs://localhost:38985/user/jenkins/test-data/95554ad6-1972-a5a1-46ae-efaa7d31c8a0/data/default/TestHBaseWalOnEC/2e07664289617f26c2020d61a6828327/.tmp/cf/3ce7f31deb9b4e60940fb59236782865 2024-11-19T12:45:05,019 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38635 is added to blk_1073741839_1015 (size=4787) 2024-11-19T12:45:05,030 DEBUG [RS_FLUSH_OPERATIONS-regionserver/aba5a916dfea:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38985/user/jenkins/test-data/95554ad6-1972-a5a1-46ae-efaa7d31c8a0/data/default/TestHBaseWalOnEC/2e07664289617f26c2020d61a6828327/.tmp/cf/3ce7f31deb9b4e60940fb59236782865 as hdfs://localhost:38985/user/jenkins/test-data/95554ad6-1972-a5a1-46ae-efaa7d31c8a0/data/default/TestHBaseWalOnEC/2e07664289617f26c2020d61a6828327/cf/3ce7f31deb9b4e60940fb59236782865 2024-11-19T12:45:05,041 INFO [RS_FLUSH_OPERATIONS-regionserver/aba5a916dfea:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38985/user/jenkins/test-data/95554ad6-1972-a5a1-46ae-efaa7d31c8a0/data/default/TestHBaseWalOnEC/2e07664289617f26c2020d61a6828327/cf/3ce7f31deb9b4e60940fb59236782865, entries=1, sequenceid=5, filesize=4.7 K 2024-11-19T12:45:05,043 INFO [RS_FLUSH_OPERATIONS-regionserver/aba5a916dfea:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~32 B/32, heapSize ~344 B/344, currentSize=0 B/0 for 2e07664289617f26c2020d61a6828327 in 57ms, sequenceid=5, compaction requested=false 2024-11-19T12:45:05,043 DEBUG [RS_FLUSH_OPERATIONS-regionserver/aba5a916dfea:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for 2e07664289617f26c2020d61a6828327: 2024-11-19T12:45:05,043 DEBUG [RS_FLUSH_OPERATIONS-regionserver/aba5a916dfea:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestHBaseWalOnEC,,1732020304165.2e07664289617f26c2020d61a6828327. 2024-11-19T12:45:05,043 DEBUG [RS_FLUSH_OPERATIONS-regionserver/aba5a916dfea:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-11-19T12:45:05,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40709 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-11-19T12:45:05,050 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-11-19T12:45:05,050 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 218 msec 2024-11-19T12:45:05,054 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC in 228 msec 2024-11-19T12:45:05,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40709 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-19T12:45:05,145 INFO [RPCClient-NioEventLoopGroup-6-8 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestHBaseWalOnEC completed 2024-11-19T12:45:05,150 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-19T12:45:05,150 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-19T12:45:05,151 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-19T12:45:05,151 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T12:45:05,151 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T12:45:05,151 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-19T12:45:05,152 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-19T12:45:05,152 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1375900484, stopped=false 2024-11-19T12:45:05,152 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=aba5a916dfea,40709,1732020302510 2024-11-19T12:45:05,164 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40709-0x101546a05550000, quorum=127.0.0.1:55862, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-19T12:45:05,164 DEBUG [pool-330-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41695-0x101546a05550002, quorum=127.0.0.1:55862, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-19T12:45:05,164 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44751-0x101546a05550003, quorum=127.0.0.1:55862, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-19T12:45:05,164 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38855-0x101546a05550001, quorum=127.0.0.1:55862, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-19T12:45:05,164 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44751-0x101546a05550003, quorum=127.0.0.1:55862, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T12:45:05,165 DEBUG [pool-330-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41695-0x101546a05550002, quorum=127.0.0.1:55862, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T12:45:05,165 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38855-0x101546a05550001, quorum=127.0.0.1:55862, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T12:45:05,165 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40709-0x101546a05550000, quorum=127.0.0.1:55862, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T12:45:05,165 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-19T12:45:05,165 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-19T12:45:05,165 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:44751-0x101546a05550003, quorum=127.0.0.1:55862, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-19T12:45:05,165 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-19T12:45:05,165 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:41695-0x101546a05550002, quorum=127.0.0.1:55862, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-19T12:45:05,165 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T12:45:05,165 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:40709-0x101546a05550000, quorum=127.0.0.1:55862, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-19T12:45:05,165 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:38855-0x101546a05550001, quorum=127.0.0.1:55862, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-19T12:45:05,166 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'aba5a916dfea,38855,1732020302685' ***** 2024-11-19T12:45:05,166 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-19T12:45:05,166 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'aba5a916dfea,41695,1732020302737' ***** 2024-11-19T12:45:05,166 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-19T12:45:05,166 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'aba5a916dfea,44751,1732020302791' ***** 2024-11-19T12:45:05,166 INFO [RS:0;aba5a916dfea:38855 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-19T12:45:05,166 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-19T12:45:05,166 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-19T12:45:05,166 INFO [RS:1;aba5a916dfea:41695 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-19T12:45:05,166 INFO [RS:0;aba5a916dfea:38855 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-19T12:45:05,167 INFO [RS:2;aba5a916dfea:44751 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-19T12:45:05,167 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-19T12:45:05,167 INFO [RS:1;aba5a916dfea:41695 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-19T12:45:05,167 INFO [RS:2;aba5a916dfea:44751 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-19T12:45:05,167 INFO [RS:1;aba5a916dfea:41695 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-19T12:45:05,167 INFO [RS:2;aba5a916dfea:44751 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-19T12:45:05,167 INFO [RS:2;aba5a916dfea:44751 {}] regionserver.HRegionServer(959): stopping server aba5a916dfea,44751,1732020302791 2024-11-19T12:45:05,167 INFO [RS:1;aba5a916dfea:41695 {}] regionserver.HRegionServer(3091): Received CLOSE for 2e07664289617f26c2020d61a6828327 2024-11-19T12:45:05,167 INFO [RS:2;aba5a916dfea:44751 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-19T12:45:05,167 INFO [RS:0;aba5a916dfea:38855 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-19T12:45:05,167 INFO [RS:0;aba5a916dfea:38855 {}] regionserver.HRegionServer(959): stopping server aba5a916dfea,38855,1732020302685 2024-11-19T12:45:05,167 INFO [RS:2;aba5a916dfea:44751 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:2;aba5a916dfea:44751. 2024-11-19T12:45:05,167 INFO [RS:0;aba5a916dfea:38855 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-19T12:45:05,167 INFO [RS:0;aba5a916dfea:38855 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;aba5a916dfea:38855. 2024-11-19T12:45:05,167 DEBUG [RS:2;aba5a916dfea:44751 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-19T12:45:05,167 DEBUG [RS:2;aba5a916dfea:44751 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T12:45:05,167 DEBUG [RS:0;aba5a916dfea:38855 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-19T12:45:05,167 INFO [RS:2;aba5a916dfea:44751 {}] regionserver.HRegionServer(976): stopping server aba5a916dfea,44751,1732020302791; all regions closed. 2024-11-19T12:45:05,167 DEBUG [RS:0;aba5a916dfea:38855 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T12:45:05,167 INFO [RS:0;aba5a916dfea:38855 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-19T12:45:05,167 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-19T12:45:05,168 INFO [RS:0;aba5a916dfea:38855 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-19T12:45:05,168 INFO [RS:0;aba5a916dfea:38855 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-19T12:45:05,168 INFO [RS:0;aba5a916dfea:38855 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-19T12:45:05,168 INFO [RS:1;aba5a916dfea:41695 {}] regionserver.HRegionServer(959): stopping server aba5a916dfea,41695,1732020302737 2024-11-19T12:45:05,169 INFO [RS:1;aba5a916dfea:41695 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-19T12:45:05,169 INFO [RS:1;aba5a916dfea:41695 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;aba5a916dfea:41695. 2024-11-19T12:45:05,169 INFO [RS:0;aba5a916dfea:38855 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-19T12:45:05,169 DEBUG [RS_CLOSE_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 2e07664289617f26c2020d61a6828327, disabling compactions & flushes 2024-11-19T12:45:05,169 DEBUG [RS:0;aba5a916dfea:38855 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-19T12:45:05,169 INFO [RS_CLOSE_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1732020304165.2e07664289617f26c2020d61a6828327. 2024-11-19T12:45:05,169 DEBUG [RS:1;aba5a916dfea:41695 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-19T12:45:05,169 DEBUG [RS:0;aba5a916dfea:38855 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-19T12:45:05,169 DEBUG [RS_CLOSE_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-19T12:45:05,169 DEBUG [RS:1;aba5a916dfea:41695 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T12:45:05,169 DEBUG [RS_CLOSE_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1732020304165.2e07664289617f26c2020d61a6828327. 2024-11-19T12:45:05,169 INFO [RS_CLOSE_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-19T12:45:05,169 DEBUG [RS_CLOSE_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1732020304165.2e07664289617f26c2020d61a6828327. after waiting 0 ms 2024-11-19T12:45:05,169 INFO [RS:1;aba5a916dfea:41695 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-19T12:45:05,169 DEBUG [RS_CLOSE_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-19T12:45:05,169 DEBUG [RS_CLOSE_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1732020304165.2e07664289617f26c2020d61a6828327. 2024-11-19T12:45:05,169 DEBUG [RS:1;aba5a916dfea:41695 {}] regionserver.HRegionServer(1325): Online Regions={2e07664289617f26c2020d61a6828327=TestHBaseWalOnEC,,1732020304165.2e07664289617f26c2020d61a6828327.} 2024-11-19T12:45:05,169 DEBUG [RS_CLOSE_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-19T12:45:05,169 DEBUG [RS:1;aba5a916dfea:41695 {}] regionserver.HRegionServer(1351): Waiting on 2e07664289617f26c2020d61a6828327 2024-11-19T12:45:05,169 DEBUG [RS_CLOSE_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-19T12:45:05,169 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:45:05,169 INFO [RS_CLOSE_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.34 KB heapSize=3.38 KB 2024-11-19T12:45:05,169 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:45:05,170 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:45:05,170 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:45:05,170 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:45:05,174 DEBUG [RS_CLOSE_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38985/user/jenkins/test-data/95554ad6-1972-a5a1-46ae-efaa7d31c8a0/data/default/TestHBaseWalOnEC/2e07664289617f26c2020d61a6828327/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-11-19T12:45:05,175 INFO [RS_CLOSE_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1732020304165.2e07664289617f26c2020d61a6828327. 2024-11-19T12:45:05,175 DEBUG [RS_CLOSE_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 2e07664289617f26c2020d61a6828327: Waiting for close lock at 1732020305169Running coprocessor pre-close hooks at 1732020305169Disabling compacts and flushes for region at 1732020305169Disabling writes for close at 1732020305169Writing region close event to WAL at 1732020305170 (+1 ms)Running coprocessor post-close hooks at 1732020305175 (+5 ms)Closed at 1732020305175 2024-11-19T12:45:05,176 DEBUG [RS_CLOSE_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestHBaseWalOnEC,,1732020304165.2e07664289617f26c2020d61a6828327. 2024-11-19T12:45:05,176 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38635 is added to blk_1073741835_1011 (size=93) 2024-11-19T12:45:05,176 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38931 is added to blk_1073741835_1011 (size=93) 2024-11-19T12:45:05,177 INFO [regionserver/aba5a916dfea:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-19T12:45:05,177 INFO [regionserver/aba5a916dfea:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-19T12:45:05,177 INFO [regionserver/aba5a916dfea:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-19T12:45:05,177 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37409 is added to blk_1073741835_1011 (size=93) 2024-11-19T12:45:05,180 DEBUG [RS:2;aba5a916dfea:44751 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/95554ad6-1972-a5a1-46ae-efaa7d31c8a0/oldWALs 2024-11-19T12:45:05,180 INFO [RS:2;aba5a916dfea:44751 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog aba5a916dfea%2C44751%2C1732020302791:(num 1732020303517) 2024-11-19T12:45:05,180 DEBUG [RS:2;aba5a916dfea:44751 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T12:45:05,180 INFO [RS:2;aba5a916dfea:44751 {}] regionserver.LeaseManager(133): Closed leases 2024-11-19T12:45:05,181 INFO [RS:2;aba5a916dfea:44751 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-19T12:45:05,181 INFO [RS:2;aba5a916dfea:44751 {}] hbase.ChoreService(370): Chore service for: regionserver/aba5a916dfea:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-19T12:45:05,181 INFO [RS:2;aba5a916dfea:44751 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-19T12:45:05,181 INFO [RS:2;aba5a916dfea:44751 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-19T12:45:05,181 INFO [regionserver/aba5a916dfea:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-19T12:45:05,181 INFO [RS:2;aba5a916dfea:44751 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-19T12:45:05,181 INFO [RS:2;aba5a916dfea:44751 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-19T12:45:05,181 INFO [RS:2;aba5a916dfea:44751 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:44751 2024-11-19T12:45:05,189 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44751-0x101546a05550003, quorum=127.0.0.1:55862, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/aba5a916dfea,44751,1732020302791 2024-11-19T12:45:05,189 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40709-0x101546a05550000, quorum=127.0.0.1:55862, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-19T12:45:05,189 INFO [RS:2;aba5a916dfea:44751 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-19T12:45:05,191 DEBUG [RS_CLOSE_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38985/user/jenkins/test-data/95554ad6-1972-a5a1-46ae-efaa7d31c8a0/data/hbase/meta/1588230740/.tmp/info/5f1cde457f8742abbf5c0f347af8cc9f is 153, key is TestHBaseWalOnEC,,1732020304165.2e07664289617f26c2020d61a6828327./info:regioninfo/1732020304575/Put/seqid=0 2024-11-19T12:45:05,198 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [aba5a916dfea,44751,1732020302791] 2024-11-19T12:45:05,200 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37409 is added to blk_1073741840_1016 (size=6637) 2024-11-19T12:45:05,200 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38931 is added to blk_1073741840_1016 (size=6637) 2024-11-19T12:45:05,200 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38635 is added to blk_1073741840_1016 (size=6637) 2024-11-19T12:45:05,201 INFO [RS_CLOSE_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.18 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:38985/user/jenkins/test-data/95554ad6-1972-a5a1-46ae-efaa7d31c8a0/data/hbase/meta/1588230740/.tmp/info/5f1cde457f8742abbf5c0f347af8cc9f 2024-11-19T12:45:05,206 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/aba5a916dfea,44751,1732020302791 already deleted, retry=false 2024-11-19T12:45:05,206 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; aba5a916dfea,44751,1732020302791 expired; onlineServers=2 2024-11-19T12:45:05,228 DEBUG [RS_CLOSE_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38985/user/jenkins/test-data/95554ad6-1972-a5a1-46ae-efaa7d31c8a0/data/hbase/meta/1588230740/.tmp/ns/8f0732727bfd46038a82fd6c304d3db6 is 43, key is default/ns:d/1732020304103/Put/seqid=0 2024-11-19T12:45:05,229 WARN [IPC Server handler 3 on default port 38985 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-19T12:45:05,229 WARN [IPC Server handler 3 on default port 38985 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-19T12:45:05,229 WARN [IPC Server handler 3 on default port 38985 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-19T12:45:05,234 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38931 is added to blk_1073741841_1017 (size=5153) 2024-11-19T12:45:05,234 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37409 is added to blk_1073741841_1017 (size=5153) 2024-11-19T12:45:05,235 INFO [RS_CLOSE_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:38985/user/jenkins/test-data/95554ad6-1972-a5a1-46ae-efaa7d31c8a0/data/hbase/meta/1588230740/.tmp/ns/8f0732727bfd46038a82fd6c304d3db6 2024-11-19T12:45:05,263 DEBUG [RS_CLOSE_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38985/user/jenkins/test-data/95554ad6-1972-a5a1-46ae-efaa7d31c8a0/data/hbase/meta/1588230740/.tmp/table/47a189ca9d9e408ba19f05c3d56a1120 is 52, key is TestHBaseWalOnEC/table:state/1732020304594/Put/seqid=0 2024-11-19T12:45:05,265 WARN [IPC Server handler 4 on default port 38985 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-19T12:45:05,265 WARN [IPC Server handler 4 on default port 38985 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-19T12:45:05,265 WARN [IPC Server handler 4 on default port 38985 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-19T12:45:05,269 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38931 is added to blk_1073741842_1018 (size=5249) 2024-11-19T12:45:05,270 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37409 is added to blk_1073741842_1018 (size=5249) 2024-11-19T12:45:05,270 INFO [RS_CLOSE_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=96 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:38985/user/jenkins/test-data/95554ad6-1972-a5a1-46ae-efaa7d31c8a0/data/hbase/meta/1588230740/.tmp/table/47a189ca9d9e408ba19f05c3d56a1120 2024-11-19T12:45:05,279 DEBUG [RS_CLOSE_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38985/user/jenkins/test-data/95554ad6-1972-a5a1-46ae-efaa7d31c8a0/data/hbase/meta/1588230740/.tmp/info/5f1cde457f8742abbf5c0f347af8cc9f as hdfs://localhost:38985/user/jenkins/test-data/95554ad6-1972-a5a1-46ae-efaa7d31c8a0/data/hbase/meta/1588230740/info/5f1cde457f8742abbf5c0f347af8cc9f 2024-11-19T12:45:05,295 INFO [RS_CLOSE_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38985/user/jenkins/test-data/95554ad6-1972-a5a1-46ae-efaa7d31c8a0/data/hbase/meta/1588230740/info/5f1cde457f8742abbf5c0f347af8cc9f, entries=10, sequenceid=11, filesize=6.5 K 2024-11-19T12:45:05,296 DEBUG [RS_CLOSE_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38985/user/jenkins/test-data/95554ad6-1972-a5a1-46ae-efaa7d31c8a0/data/hbase/meta/1588230740/.tmp/ns/8f0732727bfd46038a82fd6c304d3db6 as hdfs://localhost:38985/user/jenkins/test-data/95554ad6-1972-a5a1-46ae-efaa7d31c8a0/data/hbase/meta/1588230740/ns/8f0732727bfd46038a82fd6c304d3db6 2024-11-19T12:45:05,298 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44751-0x101546a05550003, quorum=127.0.0.1:55862, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-19T12:45:05,298 INFO [RS:2;aba5a916dfea:44751 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-19T12:45:05,298 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44751-0x101546a05550003, quorum=127.0.0.1:55862, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-19T12:45:05,298 INFO [RS:2;aba5a916dfea:44751 {}] regionserver.HRegionServer(1031): Exiting; stopping=aba5a916dfea,44751,1732020302791; zookeeper connection closed. 2024-11-19T12:45:05,298 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@79f96aa6 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@79f96aa6 2024-11-19T12:45:05,305 INFO [RS_CLOSE_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38985/user/jenkins/test-data/95554ad6-1972-a5a1-46ae-efaa7d31c8a0/data/hbase/meta/1588230740/ns/8f0732727bfd46038a82fd6c304d3db6, entries=2, sequenceid=11, filesize=5.0 K 2024-11-19T12:45:05,307 DEBUG [RS_CLOSE_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38985/user/jenkins/test-data/95554ad6-1972-a5a1-46ae-efaa7d31c8a0/data/hbase/meta/1588230740/.tmp/table/47a189ca9d9e408ba19f05c3d56a1120 as hdfs://localhost:38985/user/jenkins/test-data/95554ad6-1972-a5a1-46ae-efaa7d31c8a0/data/hbase/meta/1588230740/table/47a189ca9d9e408ba19f05c3d56a1120 2024-11-19T12:45:05,314 INFO [RS_CLOSE_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38985/user/jenkins/test-data/95554ad6-1972-a5a1-46ae-efaa7d31c8a0/data/hbase/meta/1588230740/table/47a189ca9d9e408ba19f05c3d56a1120, entries=2, sequenceid=11, filesize=5.1 K 2024-11-19T12:45:05,316 INFO [RS_CLOSE_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 147ms, sequenceid=11, compaction requested=false 2024-11-19T12:45:05,322 DEBUG [RS_CLOSE_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38985/user/jenkins/test-data/95554ad6-1972-a5a1-46ae-efaa7d31c8a0/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-19T12:45:05,323 DEBUG [RS_CLOSE_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-19T12:45:05,323 INFO [RS_CLOSE_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-19T12:45:05,323 DEBUG [RS_CLOSE_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732020305169Running coprocessor pre-close hooks at 1732020305169Disabling compacts and flushes for region at 1732020305169Disabling writes for close at 1732020305169Obtaining lock to block concurrent updates at 1732020305169Preparing flush snapshotting stores in 1588230740 at 1732020305169Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1377, getHeapSize=3392, getOffHeapSize=0, getCellsCount=14 at 1732020305170 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1732020305171 (+1 ms)Flushing 1588230740/info: creating writer at 1732020305171Flushing 1588230740/info: appending metadata at 1732020305191 (+20 ms)Flushing 1588230740/info: closing flushed file at 1732020305191Flushing 1588230740/ns: creating writer at 1732020305211 (+20 ms)Flushing 1588230740/ns: appending metadata at 1732020305227 (+16 ms)Flushing 1588230740/ns: closing flushed file at 1732020305227Flushing 1588230740/table: creating writer at 1732020305243 (+16 ms)Flushing 1588230740/table: appending metadata at 1732020305262 (+19 ms)Flushing 1588230740/table: closing flushed file at 1732020305263 (+1 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4543786c: reopening flushed file at 1732020305277 (+14 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@e7c5a1f: reopening flushed file at 1732020305295 (+18 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@a04b8f4: reopening flushed file at 1732020305306 (+11 ms)Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 147ms, sequenceid=11, compaction requested=false at 1732020305316 (+10 ms)Writing region close event to WAL at 1732020305317 (+1 ms)Running coprocessor post-close hooks at 1732020305322 (+5 ms)Closed at 1732020305323 (+1 ms) 2024-11-19T12:45:05,323 DEBUG [RS_CLOSE_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-19T12:45:05,369 INFO [RS:0;aba5a916dfea:38855 {}] regionserver.HRegionServer(976): stopping server aba5a916dfea,38855,1732020302685; all regions closed. 2024-11-19T12:45:05,369 INFO [RS:1;aba5a916dfea:41695 {}] regionserver.HRegionServer(976): stopping server aba5a916dfea,41695,1732020302737; all regions closed. 2024-11-19T12:45:05,370 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:45:05,370 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:45:05,370 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:45:05,370 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:45:05,370 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:45:05,370 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:45:05,370 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:45:05,370 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:45:05,370 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:45:05,370 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:45:05,373 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37409 is added to blk_1073741833_1009 (size=1298) 2024-11-19T12:45:05,374 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37409 is added to blk_1073741836_1012 (size=2751) 2024-11-19T12:45:05,374 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38635 is added to blk_1073741836_1012 (size=2751) 2024-11-19T12:45:05,375 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38931 is added to blk_1073741836_1012 (size=2751) 2024-11-19T12:45:05,375 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38635 is added to blk_1073741833_1009 (size=1298) 2024-11-19T12:45:05,375 INFO [regionserver/aba5a916dfea:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-19T12:45:05,375 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38931 is added to blk_1073741833_1009 (size=1298) 2024-11-19T12:45:05,375 INFO [regionserver/aba5a916dfea:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-19T12:45:05,378 DEBUG [RS:1;aba5a916dfea:41695 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/95554ad6-1972-a5a1-46ae-efaa7d31c8a0/oldWALs 2024-11-19T12:45:05,378 DEBUG [RS:0;aba5a916dfea:38855 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/95554ad6-1972-a5a1-46ae-efaa7d31c8a0/oldWALs 2024-11-19T12:45:05,378 INFO [RS:1;aba5a916dfea:41695 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog aba5a916dfea%2C41695%2C1732020302737:(num 1732020303517) 2024-11-19T12:45:05,378 INFO [RS:0;aba5a916dfea:38855 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog aba5a916dfea%2C38855%2C1732020302685.meta:.meta(num 1732020304004) 2024-11-19T12:45:05,378 DEBUG [RS:1;aba5a916dfea:41695 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T12:45:05,378 INFO [RS:1;aba5a916dfea:41695 {}] regionserver.LeaseManager(133): Closed leases 2024-11-19T12:45:05,378 INFO [RS:1;aba5a916dfea:41695 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-19T12:45:05,379 INFO [RS:1;aba5a916dfea:41695 {}] hbase.ChoreService(370): Chore service for: regionserver/aba5a916dfea:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-19T12:45:05,379 INFO [RS:1;aba5a916dfea:41695 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-19T12:45:05,379 INFO [RS:1;aba5a916dfea:41695 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-19T12:45:05,379 INFO [RS:1;aba5a916dfea:41695 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-19T12:45:05,379 INFO [RS:1;aba5a916dfea:41695 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-19T12:45:05,379 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:45:05,379 INFO [RS:1;aba5a916dfea:41695 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:41695 2024-11-19T12:45:05,379 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:45:05,379 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:45:05,380 INFO [regionserver/aba5a916dfea:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-19T12:45:05,380 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:45:05,380 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:45:05,383 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38635 is added to blk_1073741834_1010 (size=93) 2024-11-19T12:45:05,383 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37409 is added to blk_1073741834_1010 (size=93) 2024-11-19T12:45:05,383 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38931 is added to blk_1073741834_1010 (size=93) 2024-11-19T12:45:05,385 DEBUG [RS:0;aba5a916dfea:38855 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/95554ad6-1972-a5a1-46ae-efaa7d31c8a0/oldWALs 2024-11-19T12:45:05,386 INFO [RS:0;aba5a916dfea:38855 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog aba5a916dfea%2C38855%2C1732020302685:(num 1732020303516) 2024-11-19T12:45:05,386 DEBUG [RS:0;aba5a916dfea:38855 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T12:45:05,386 INFO [RS:0;aba5a916dfea:38855 {}] regionserver.LeaseManager(133): Closed leases 2024-11-19T12:45:05,386 INFO [RS:0;aba5a916dfea:38855 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-19T12:45:05,386 INFO [RS:0;aba5a916dfea:38855 {}] hbase.ChoreService(370): Chore service for: regionserver/aba5a916dfea:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-19T12:45:05,386 INFO [RS:0;aba5a916dfea:38855 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-19T12:45:05,386 INFO [regionserver/aba5a916dfea:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-19T12:45:05,386 INFO [RS:0;aba5a916dfea:38855 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:38855 2024-11-19T12:45:05,404 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40709-0x101546a05550000, quorum=127.0.0.1:55862, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-19T12:45:05,404 DEBUG [pool-330-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41695-0x101546a05550002, quorum=127.0.0.1:55862, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/aba5a916dfea,41695,1732020302737 2024-11-19T12:45:05,404 INFO [RS:1;aba5a916dfea:41695 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-19T12:45:05,414 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38855-0x101546a05550001, quorum=127.0.0.1:55862, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/aba5a916dfea,38855,1732020302685 2024-11-19T12:45:05,414 INFO [RS:0;aba5a916dfea:38855 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-19T12:45:05,423 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [aba5a916dfea,38855,1732020302685] 2024-11-19T12:45:05,439 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/aba5a916dfea,38855,1732020302685 already deleted, retry=false 2024-11-19T12:45:05,439 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; aba5a916dfea,38855,1732020302685 expired; onlineServers=1 2024-11-19T12:45:05,439 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [aba5a916dfea,41695,1732020302737] 2024-11-19T12:45:05,447 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/aba5a916dfea,41695,1732020302737 already deleted, retry=false 2024-11-19T12:45:05,448 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; aba5a916dfea,41695,1732020302737 expired; onlineServers=0 2024-11-19T12:45:05,448 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'aba5a916dfea,40709,1732020302510' ***** 2024-11-19T12:45:05,448 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-19T12:45:05,448 INFO [M:0;aba5a916dfea:40709 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-19T12:45:05,448 INFO [M:0;aba5a916dfea:40709 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-19T12:45:05,448 DEBUG [M:0;aba5a916dfea:40709 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-19T12:45:05,448 DEBUG [M:0;aba5a916dfea:40709 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-19T12:45:05,448 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-19T12:45:05,448 DEBUG [master/aba5a916dfea:0:becomeActiveMaster-HFileCleaner.large.0-1732020303239 {}] cleaner.HFileCleaner(306): Exit Thread[master/aba5a916dfea:0:becomeActiveMaster-HFileCleaner.large.0-1732020303239,5,FailOnTimeoutGroup] 2024-11-19T12:45:05,448 DEBUG [master/aba5a916dfea:0:becomeActiveMaster-HFileCleaner.small.0-1732020303239 {}] cleaner.HFileCleaner(306): Exit Thread[master/aba5a916dfea:0:becomeActiveMaster-HFileCleaner.small.0-1732020303239,5,FailOnTimeoutGroup] 2024-11-19T12:45:05,449 INFO [M:0;aba5a916dfea:40709 {}] hbase.ChoreService(370): Chore service for: master/aba5a916dfea:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-19T12:45:05,449 INFO [M:0;aba5a916dfea:40709 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-19T12:45:05,449 DEBUG [M:0;aba5a916dfea:40709 {}] master.HMaster(1795): Stopping service threads 2024-11-19T12:45:05,449 INFO [M:0;aba5a916dfea:40709 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-19T12:45:05,449 INFO [M:0;aba5a916dfea:40709 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-19T12:45:05,449 INFO [M:0;aba5a916dfea:40709 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-19T12:45:05,450 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-19T12:45:05,456 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40709-0x101546a05550000, quorum=127.0.0.1:55862, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-19T12:45:05,456 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40709-0x101546a05550000, quorum=127.0.0.1:55862, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T12:45:05,456 DEBUG [M:0;aba5a916dfea:40709 {}] zookeeper.ZKUtil(347): master:40709-0x101546a05550000, quorum=127.0.0.1:55862, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-19T12:45:05,456 WARN [M:0;aba5a916dfea:40709 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-19T12:45:05,457 INFO [M:0;aba5a916dfea:40709 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:38985/user/jenkins/test-data/95554ad6-1972-a5a1-46ae-efaa7d31c8a0/.lastflushedseqids 2024-11-19T12:45:05,481 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38635 is added to blk_1073741843_1019 (size=127) 2024-11-19T12:45:05,481 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38931 is added to blk_1073741843_1019 (size=127) 2024-11-19T12:45:05,482 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37409 is added to blk_1073741843_1019 (size=127) 2024-11-19T12:45:05,482 INFO [M:0;aba5a916dfea:40709 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-19T12:45:05,482 INFO [M:0;aba5a916dfea:40709 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-19T12:45:05,482 DEBUG [M:0;aba5a916dfea:40709 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-19T12:45:05,482 INFO [M:0;aba5a916dfea:40709 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T12:45:05,482 DEBUG [M:0;aba5a916dfea:40709 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T12:45:05,482 DEBUG [M:0;aba5a916dfea:40709 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-19T12:45:05,482 DEBUG [M:0;aba5a916dfea:40709 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T12:45:05,483 INFO [M:0;aba5a916dfea:40709 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=26.84 KB heapSize=34.13 KB 2024-11-19T12:45:05,507 DEBUG [M:0;aba5a916dfea:40709 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38985/user/jenkins/test-data/95554ad6-1972-a5a1-46ae-efaa7d31c8a0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/eac606602206410eaba49b073fdc4612 is 82, key is hbase:meta,,1/info:regioninfo/1732020304061/Put/seqid=0 2024-11-19T12:45:05,521 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37409 is added to blk_1073741844_1020 (size=5672) 2024-11-19T12:45:05,522 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38931 is added to blk_1073741844_1020 (size=5672) 2024-11-19T12:45:05,522 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38635 is added to blk_1073741844_1020 (size=5672) 2024-11-19T12:45:05,523 INFO [M:0;aba5a916dfea:40709 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:38985/user/jenkins/test-data/95554ad6-1972-a5a1-46ae-efaa7d31c8a0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/eac606602206410eaba49b073fdc4612 2024-11-19T12:45:05,524 DEBUG [pool-330-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41695-0x101546a05550002, quorum=127.0.0.1:55862, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-19T12:45:05,524 INFO [RS:1;aba5a916dfea:41695 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-19T12:45:05,524 DEBUG [pool-330-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41695-0x101546a05550002, quorum=127.0.0.1:55862, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-19T12:45:05,524 INFO [RS:1;aba5a916dfea:41695 {}] regionserver.HRegionServer(1031): Exiting; stopping=aba5a916dfea,41695,1732020302737; zookeeper connection closed. 2024-11-19T12:45:05,524 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@b70bb7f {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@b70bb7f 2024-11-19T12:45:05,531 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38855-0x101546a05550001, quorum=127.0.0.1:55862, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-19T12:45:05,531 INFO [RS:0;aba5a916dfea:38855 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-19T12:45:05,532 INFO [RS:0;aba5a916dfea:38855 {}] regionserver.HRegionServer(1031): Exiting; stopping=aba5a916dfea,38855,1732020302685; zookeeper connection closed. 2024-11-19T12:45:05,532 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38855-0x101546a05550001, quorum=127.0.0.1:55862, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-19T12:45:05,535 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@7e2ba182 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@7e2ba182 2024-11-19T12:45:05,535 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 3 regionserver(s) complete 2024-11-19T12:45:05,554 DEBUG [M:0;aba5a916dfea:40709 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38985/user/jenkins/test-data/95554ad6-1972-a5a1-46ae-efaa7d31c8a0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/df792c457d554bf1b55d70b4081b0ccd is 749, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1732020304604/Put/seqid=0 2024-11-19T12:45:05,569 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38635 is added to blk_1073741845_1021 (size=6440) 2024-11-19T12:45:05,569 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37409 is added to blk_1073741845_1021 (size=6440) 2024-11-19T12:45:05,570 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38931 is added to blk_1073741845_1021 (size=6440) 2024-11-19T12:45:05,571 INFO [M:0;aba5a916dfea:40709 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.16 KB at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:38985/user/jenkins/test-data/95554ad6-1972-a5a1-46ae-efaa7d31c8a0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/df792c457d554bf1b55d70b4081b0ccd 2024-11-19T12:45:05,600 DEBUG [M:0;aba5a916dfea:40709 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38985/user/jenkins/test-data/95554ad6-1972-a5a1-46ae-efaa7d31c8a0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/b55777fb67d34efa8a58521f03978a45 is 69, key is aba5a916dfea,38855,1732020302685/rs:state/1732020303301/Put/seqid=0 2024-11-19T12:45:05,614 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38931 is added to blk_1073741846_1022 (size=5294) 2024-11-19T12:45:05,614 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37409 is added to blk_1073741846_1022 (size=5294) 2024-11-19T12:45:05,614 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38635 is added to blk_1073741846_1022 (size=5294) 2024-11-19T12:45:05,616 INFO [M:0;aba5a916dfea:40709 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=195 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:38985/user/jenkins/test-data/95554ad6-1972-a5a1-46ae-efaa7d31c8a0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/b55777fb67d34efa8a58521f03978a45 2024-11-19T12:45:05,622 DEBUG [M:0;aba5a916dfea:40709 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38985/user/jenkins/test-data/95554ad6-1972-a5a1-46ae-efaa7d31c8a0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/eac606602206410eaba49b073fdc4612 as hdfs://localhost:38985/user/jenkins/test-data/95554ad6-1972-a5a1-46ae-efaa7d31c8a0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/eac606602206410eaba49b073fdc4612 2024-11-19T12:45:05,630 INFO [M:0;aba5a916dfea:40709 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38985/user/jenkins/test-data/95554ad6-1972-a5a1-46ae-efaa7d31c8a0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/eac606602206410eaba49b073fdc4612, entries=8, sequenceid=72, filesize=5.5 K 2024-11-19T12:45:05,632 DEBUG [M:0;aba5a916dfea:40709 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38985/user/jenkins/test-data/95554ad6-1972-a5a1-46ae-efaa7d31c8a0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/df792c457d554bf1b55d70b4081b0ccd as hdfs://localhost:38985/user/jenkins/test-data/95554ad6-1972-a5a1-46ae-efaa7d31c8a0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/df792c457d554bf1b55d70b4081b0ccd 2024-11-19T12:45:05,640 INFO [M:0;aba5a916dfea:40709 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38985/user/jenkins/test-data/95554ad6-1972-a5a1-46ae-efaa7d31c8a0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/df792c457d554bf1b55d70b4081b0ccd, entries=8, sequenceid=72, filesize=6.3 K 2024-11-19T12:45:05,641 DEBUG [M:0;aba5a916dfea:40709 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38985/user/jenkins/test-data/95554ad6-1972-a5a1-46ae-efaa7d31c8a0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/b55777fb67d34efa8a58521f03978a45 as hdfs://localhost:38985/user/jenkins/test-data/95554ad6-1972-a5a1-46ae-efaa7d31c8a0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/b55777fb67d34efa8a58521f03978a45 2024-11-19T12:45:05,649 INFO [M:0;aba5a916dfea:40709 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38985/user/jenkins/test-data/95554ad6-1972-a5a1-46ae-efaa7d31c8a0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/b55777fb67d34efa8a58521f03978a45, entries=3, sequenceid=72, filesize=5.2 K 2024-11-19T12:45:05,651 INFO [M:0;aba5a916dfea:40709 {}] regionserver.HRegion(3140): Finished flush of dataSize ~26.84 KB/27483, heapSize ~33.83 KB/34640, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 169ms, sequenceid=72, compaction requested=false 2024-11-19T12:45:05,652 INFO [M:0;aba5a916dfea:40709 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T12:45:05,653 DEBUG [M:0;aba5a916dfea:40709 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732020305482Disabling compacts and flushes for region at 1732020305482Disabling writes for close at 1732020305482Obtaining lock to block concurrent updates at 1732020305483 (+1 ms)Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732020305483Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=27483, getHeapSize=34880, getOffHeapSize=0, getCellsCount=85 at 1732020305483Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732020305484 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732020305484Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732020305506 (+22 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732020305506Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732020305530 (+24 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732020305554 (+24 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732020305554Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732020305579 (+25 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732020305599 (+20 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732020305599Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@563d5b3b: reopening flushed file at 1732020305621 (+22 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3d0c2059: reopening flushed file at 1732020305630 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2f54a655: reopening flushed file at 1732020305640 (+10 ms)Finished flush of dataSize ~26.84 KB/27483, heapSize ~33.83 KB/34640, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 169ms, sequenceid=72, compaction requested=false at 1732020305651 (+11 ms)Writing region close event to WAL at 1732020305652 (+1 ms)Closed at 1732020305652 2024-11-19T12:45:05,653 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:45:05,653 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:45:05,653 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:45:05,653 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:45:05,653 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:45:05,656 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38635 is added to blk_1073741830_1006 (size=32686) 2024-11-19T12:45:05,656 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37409 is added to blk_1073741830_1006 (size=32686) 2024-11-19T12:45:05,657 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38931 is added to blk_1073741830_1006 (size=32686) 2024-11-19T12:45:05,657 INFO [M:0;aba5a916dfea:40709 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-19T12:45:05,657 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-19T12:45:05,657 INFO [M:0;aba5a916dfea:40709 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:40709 2024-11-19T12:45:05,657 INFO [M:0;aba5a916dfea:40709 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-19T12:45:05,781 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40709-0x101546a05550000, quorum=127.0.0.1:55862, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-19T12:45:05,781 INFO [M:0;aba5a916dfea:40709 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-19T12:45:05,781 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40709-0x101546a05550000, quorum=127.0.0.1:55862, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-19T12:45:05,784 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@aa26abb{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T12:45:05,784 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3c97cc8b{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-19T12:45:05,784 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-19T12:45:05,785 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@736fee86{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-19T12:45:05,785 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@49c714b2{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/18722424-cd1d-6749-9f51-db6fe4ba27fd/hadoop.log.dir/,STOPPED} 2024-11-19T12:45:05,786 WARN [BP-542822039-172.17.0.2-1732020300446 heartbeating to localhost/127.0.0.1:38985 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-19T12:45:05,786 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-19T12:45:05,786 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-19T12:45:05,786 WARN [BP-542822039-172.17.0.2-1732020300446 heartbeating to localhost/127.0.0.1:38985 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-542822039-172.17.0.2-1732020300446 (Datanode Uuid 9c21a45e-ffa4-4275-a1a0-f79176683b36) service to localhost/127.0.0.1:38985 2024-11-19T12:45:05,787 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/18722424-cd1d-6749-9f51-db6fe4ba27fd/cluster_29a0d4db-def5-2c8e-3cf3-7fb964947374/data/data5/current/BP-542822039-172.17.0.2-1732020300446 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T12:45:05,787 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/18722424-cd1d-6749-9f51-db6fe4ba27fd/cluster_29a0d4db-def5-2c8e-3cf3-7fb964947374/data/data6/current/BP-542822039-172.17.0.2-1732020300446 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T12:45:05,787 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-19T12:45:05,790 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@230b47a5{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T12:45:05,790 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@72785dee{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-19T12:45:05,790 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-19T12:45:05,790 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@18d7f435{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-19T12:45:05,790 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@42c6a290{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/18722424-cd1d-6749-9f51-db6fe4ba27fd/hadoop.log.dir/,STOPPED} 2024-11-19T12:45:05,792 WARN [BP-542822039-172.17.0.2-1732020300446 heartbeating to localhost/127.0.0.1:38985 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-19T12:45:05,792 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-19T12:45:05,792 WARN [BP-542822039-172.17.0.2-1732020300446 heartbeating to localhost/127.0.0.1:38985 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-542822039-172.17.0.2-1732020300446 (Datanode Uuid b4646c5e-69ff-4792-9bdd-6a9a6cea4661) service to localhost/127.0.0.1:38985 2024-11-19T12:45:05,792 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-19T12:45:05,792 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/18722424-cd1d-6749-9f51-db6fe4ba27fd/cluster_29a0d4db-def5-2c8e-3cf3-7fb964947374/data/data3/current/BP-542822039-172.17.0.2-1732020300446 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T12:45:05,793 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/18722424-cd1d-6749-9f51-db6fe4ba27fd/cluster_29a0d4db-def5-2c8e-3cf3-7fb964947374/data/data4/current/BP-542822039-172.17.0.2-1732020300446 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T12:45:05,793 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-19T12:45:05,795 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@381275e5{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T12:45:05,796 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@68e19264{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-19T12:45:05,796 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-19T12:45:05,796 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@443cc38c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-19T12:45:05,796 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@47030e3{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/18722424-cd1d-6749-9f51-db6fe4ba27fd/hadoop.log.dir/,STOPPED} 2024-11-19T12:45:05,797 WARN [BP-542822039-172.17.0.2-1732020300446 heartbeating to localhost/127.0.0.1:38985 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-19T12:45:05,797 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-19T12:45:05,797 WARN [BP-542822039-172.17.0.2-1732020300446 heartbeating to localhost/127.0.0.1:38985 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-542822039-172.17.0.2-1732020300446 (Datanode Uuid 7e925ba3-ea8c-4c13-b4d4-2806222a4a22) service to localhost/127.0.0.1:38985 2024-11-19T12:45:05,797 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-19T12:45:05,798 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/18722424-cd1d-6749-9f51-db6fe4ba27fd/cluster_29a0d4db-def5-2c8e-3cf3-7fb964947374/data/data1/current/BP-542822039-172.17.0.2-1732020300446 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T12:45:05,798 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/18722424-cd1d-6749-9f51-db6fe4ba27fd/cluster_29a0d4db-def5-2c8e-3cf3-7fb964947374/data/data2/current/BP-542822039-172.17.0.2-1732020300446 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T12:45:05,798 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-19T12:45:05,805 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1a400a96{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-19T12:45:05,806 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4072566{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-19T12:45:05,806 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-19T12:45:05,806 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4ed1f55a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-19T12:45:05,806 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@51dee452{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/18722424-cd1d-6749-9f51-db6fe4ba27fd/hadoop.log.dir/,STOPPED} 2024-11-19T12:45:05,813 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-19T12:45:05,843 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-19T12:45:05,849 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestHBaseWalOnEC#testReadWrite[1] Thread=148 (was 85) - Thread LEAK? -, OpenFileDescriptor=516 (was 443) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=335 (was 312) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=6398 (was 6613)