2024-12-06 08:54:19,319 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba 2024-12-06 08:54:19,333 main DEBUG Took 0.011623 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-12-06 08:54:19,333 main DEBUG PluginManager 'Core' found 129 plugins 2024-12-06 08:54:19,334 main DEBUG PluginManager 'Level' found 0 plugins 2024-12-06 08:54:19,335 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-12-06 08:54:19,337 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-06 08:54:19,355 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-12-06 08:54:19,368 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-06 08:54:19,370 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-06 08:54:19,370 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-06 08:54:19,371 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-06 08:54:19,371 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-06 08:54:19,371 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-06 08:54:19,372 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-06 08:54:19,372 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-06 08:54:19,373 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-06 08:54:19,373 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-06 08:54:19,374 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-06 08:54:19,374 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-06 08:54:19,375 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-06 08:54:19,375 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-06 08:54:19,376 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-06 08:54:19,376 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-06 08:54:19,376 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-06 08:54:19,377 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-06 08:54:19,377 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-06 08:54:19,377 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-06 08:54:19,378 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-06 08:54:19,378 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-06 08:54:19,378 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-06 08:54:19,379 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-06 08:54:19,379 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-06 08:54:19,379 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-12-06 08:54:19,381 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-06 08:54:19,382 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-12-06 08:54:19,384 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-12-06 08:54:19,384 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-12-06 08:54:19,385 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-12-06 08:54:19,386 main DEBUG PluginManager 'Converter' found 47 plugins 2024-12-06 08:54:19,395 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-12-06 08:54:19,398 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-12-06 08:54:19,400 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-12-06 08:54:19,400 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-12-06 08:54:19,400 main DEBUG createAppenders(={Console}) 2024-12-06 08:54:19,401 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba initialized 2024-12-06 08:54:19,401 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba 2024-12-06 08:54:19,402 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba OK. 2024-12-06 08:54:19,402 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-12-06 08:54:19,402 main DEBUG OutputStream closed 2024-12-06 08:54:19,403 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-12-06 08:54:19,403 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-12-06 08:54:19,403 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@49c7b90e OK 2024-12-06 08:54:19,492 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-12-06 08:54:19,494 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-12-06 08:54:19,496 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-12-06 08:54:19,497 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-12-06 08:54:19,498 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-12-06 08:54:19,499 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-12-06 08:54:19,499 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-12-06 08:54:19,500 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-12-06 08:54:19,501 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-12-06 08:54:19,501 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-12-06 08:54:19,501 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-12-06 08:54:19,502 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-12-06 08:54:19,503 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-12-06 08:54:19,503 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-12-06 08:54:19,503 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-12-06 08:54:19,504 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-12-06 08:54:19,504 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-12-06 08:54:19,505 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-12-06 08:54:19,507 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-06 08:54:19,508 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-logging/target/hbase-logging-4.0.0-alpha-1-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@35432107) with optional ClassLoader: null 2024-12-06 08:54:19,508 main DEBUG Shutdown hook enabled. Registering a new one. 2024-12-06 08:54:19,509 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@35432107] started OK. 2024-12-06T08:54:19,527 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC timeout: 26 mins 2024-12-06 08:54:19,531 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-12-06 08:54:19,531 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-06T08:54:19,875 DEBUG [main {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8ddbb958-65a4-ef8c-2a6d-24135ce9116d 2024-12-06T08:54:19,916 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8ddbb958-65a4-ef8c-2a6d-24135ce9116d/cluster_b85cb446-0f54-2bc9-dc8e-bbf053994405, deleteOnExit=true 2024-12-06T08:54:19,918 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8ddbb958-65a4-ef8c-2a6d-24135ce9116d/test.cache.data in system properties and HBase conf 2024-12-06T08:54:19,919 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8ddbb958-65a4-ef8c-2a6d-24135ce9116d/hadoop.tmp.dir in system properties and HBase conf 2024-12-06T08:54:19,920 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8ddbb958-65a4-ef8c-2a6d-24135ce9116d/hadoop.log.dir in system properties and HBase conf 2024-12-06T08:54:19,921 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8ddbb958-65a4-ef8c-2a6d-24135ce9116d/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-06T08:54:19,921 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8ddbb958-65a4-ef8c-2a6d-24135ce9116d/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-06T08:54:19,922 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-06T08:54:20,055 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-12-06T08:54:20,188 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-06T08:54:20,194 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8ddbb958-65a4-ef8c-2a6d-24135ce9116d/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-06T08:54:20,194 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8ddbb958-65a4-ef8c-2a6d-24135ce9116d/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-06T08:54:20,195 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8ddbb958-65a4-ef8c-2a6d-24135ce9116d/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-06T08:54:20,196 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8ddbb958-65a4-ef8c-2a6d-24135ce9116d/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-06T08:54:20,196 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8ddbb958-65a4-ef8c-2a6d-24135ce9116d/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-06T08:54:20,198 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8ddbb958-65a4-ef8c-2a6d-24135ce9116d/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-06T08:54:20,198 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8ddbb958-65a4-ef8c-2a6d-24135ce9116d/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-06T08:54:20,199 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8ddbb958-65a4-ef8c-2a6d-24135ce9116d/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-06T08:54:20,200 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8ddbb958-65a4-ef8c-2a6d-24135ce9116d/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-06T08:54:20,200 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8ddbb958-65a4-ef8c-2a6d-24135ce9116d/nfs.dump.dir in system properties and HBase conf 2024-12-06T08:54:20,201 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8ddbb958-65a4-ef8c-2a6d-24135ce9116d/java.io.tmpdir in system properties and HBase conf 2024-12-06T08:54:20,201 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8ddbb958-65a4-ef8c-2a6d-24135ce9116d/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-06T08:54:20,202 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8ddbb958-65a4-ef8c-2a6d-24135ce9116d/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-06T08:54:20,202 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8ddbb958-65a4-ef8c-2a6d-24135ce9116d/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-06T08:54:21,213 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-12-06T08:54:21,312 INFO [Time-limited test {}] log.Log(170): Logging initialized @2775ms to org.eclipse.jetty.util.log.Slf4jLog 2024-12-06T08:54:21,396 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-06T08:54:21,461 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-06T08:54:21,490 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-06T08:54:21,490 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-06T08:54:21,492 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-06T08:54:21,510 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-06T08:54:21,513 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@760c69c0{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8ddbb958-65a4-ef8c-2a6d-24135ce9116d/hadoop.log.dir/,AVAILABLE} 2024-12-06T08:54:21,514 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@ce709a8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-06T08:54:21,740 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@62d6efd9{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8ddbb958-65a4-ef8c-2a6d-24135ce9116d/java.io.tmpdir/jetty-localhost-38163-hadoop-hdfs-3_4_1-tests_jar-_-any-9164909516678503462/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-06T08:54:21,750 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@353d35a1{HTTP/1.1, (http/1.1)}{localhost:38163} 2024-12-06T08:54:21,750 INFO [Time-limited test {}] server.Server(415): Started @3214ms 2024-12-06T08:54:22,184 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-06T08:54:22,193 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-06T08:54:22,194 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-06T08:54:22,194 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-06T08:54:22,195 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-06T08:54:22,196 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3665148e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8ddbb958-65a4-ef8c-2a6d-24135ce9116d/hadoop.log.dir/,AVAILABLE} 2024-12-06T08:54:22,197 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3b3a0659{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-06T08:54:22,318 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5b23cf15{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8ddbb958-65a4-ef8c-2a6d-24135ce9116d/java.io.tmpdir/jetty-localhost-42957-hadoop-hdfs-3_4_1-tests_jar-_-any-2895371645908914934/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-06T08:54:22,319 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7f9e5902{HTTP/1.1, (http/1.1)}{localhost:42957} 2024-12-06T08:54:22,319 INFO [Time-limited test {}] server.Server(415): Started @3783ms 2024-12-06T08:54:22,380 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-06T08:54:22,505 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-06T08:54:22,514 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-06T08:54:22,519 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-06T08:54:22,519 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-06T08:54:22,520 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-06T08:54:22,529 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5435fd88{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8ddbb958-65a4-ef8c-2a6d-24135ce9116d/hadoop.log.dir/,AVAILABLE} 2024-12-06T08:54:22,530 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@65cd6e19{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-06T08:54:22,677 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@14402056{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8ddbb958-65a4-ef8c-2a6d-24135ce9116d/java.io.tmpdir/jetty-localhost-44035-hadoop-hdfs-3_4_1-tests_jar-_-any-16841831246379948315/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-06T08:54:22,678 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3e1cb3ec{HTTP/1.1, (http/1.1)}{localhost:44035} 2024-12-06T08:54:22,678 INFO [Time-limited test {}] server.Server(415): Started @4142ms 2024-12-06T08:54:22,681 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-06T08:54:22,719 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-06T08:54:22,724 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-06T08:54:22,727 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-06T08:54:22,727 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-06T08:54:22,727 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-06T08:54:22,728 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@233bb3ea{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8ddbb958-65a4-ef8c-2a6d-24135ce9116d/hadoop.log.dir/,AVAILABLE} 2024-12-06T08:54:22,729 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@146c020c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-06T08:54:22,851 WARN [Thread-108 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8ddbb958-65a4-ef8c-2a6d-24135ce9116d/cluster_b85cb446-0f54-2bc9-dc8e-bbf053994405/data/data2/current/BP-291478311-172.17.0.2-1733475260908/current, will proceed with Du for space computation calculation, 2024-12-06T08:54:22,851 WARN [Thread-107 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8ddbb958-65a4-ef8c-2a6d-24135ce9116d/cluster_b85cb446-0f54-2bc9-dc8e-bbf053994405/data/data1/current/BP-291478311-172.17.0.2-1733475260908/current, will proceed with Du for space computation calculation, 2024-12-06T08:54:22,851 WARN [Thread-105 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8ddbb958-65a4-ef8c-2a6d-24135ce9116d/cluster_b85cb446-0f54-2bc9-dc8e-bbf053994405/data/data3/current/BP-291478311-172.17.0.2-1733475260908/current, will proceed with Du for space computation calculation, 2024-12-06T08:54:22,851 WARN [Thread-106 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8ddbb958-65a4-ef8c-2a6d-24135ce9116d/cluster_b85cb446-0f54-2bc9-dc8e-bbf053994405/data/data4/current/BP-291478311-172.17.0.2-1733475260908/current, will proceed with Du for space computation calculation, 2024-12-06T08:54:22,856 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2a9ecb50{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8ddbb958-65a4-ef8c-2a6d-24135ce9116d/java.io.tmpdir/jetty-localhost-41943-hadoop-hdfs-3_4_1-tests_jar-_-any-15605324876656519313/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-06T08:54:22,857 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4067fd8a{HTTP/1.1, (http/1.1)}{localhost:41943} 2024-12-06T08:54:22,857 INFO [Time-limited test {}] server.Server(415): Started @4321ms 2024-12-06T08:54:22,861 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-06T08:54:22,898 WARN [Thread-82 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-06T08:54:22,899 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-06T08:54:22,975 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xd06041ca7c33bb8a with lease ID 0xfaab4ccc29629168: Processing first storage report for DS-41cd615e-ddf3-441e-837f-3704a1617c48 from datanode DatanodeRegistration(127.0.0.1:32979, datanodeUuid=33a579dd-2608-4ac0-bdc6-1d7cd3f53e32, infoPort=43805, infoSecurePort=0, ipcPort=38231, storageInfo=lv=-57;cid=testClusterID;nsid=1837365889;c=1733475260908) 2024-12-06T08:54:22,977 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd06041ca7c33bb8a with lease ID 0xfaab4ccc29629168: from storage DS-41cd615e-ddf3-441e-837f-3704a1617c48 node DatanodeRegistration(127.0.0.1:32979, datanodeUuid=33a579dd-2608-4ac0-bdc6-1d7cd3f53e32, infoPort=43805, infoSecurePort=0, ipcPort=38231, storageInfo=lv=-57;cid=testClusterID;nsid=1837365889;c=1733475260908), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-06T08:54:22,977 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x9b6f2ea72db41dce with lease ID 0xfaab4ccc29629169: Processing first storage report for DS-0d93c57c-7eaa-47bc-b70c-4cf0a19ad55a from datanode DatanodeRegistration(127.0.0.1:43435, datanodeUuid=0f1873dd-1d16-4804-87d4-af54c97f4833, infoPort=40153, infoSecurePort=0, ipcPort=36239, storageInfo=lv=-57;cid=testClusterID;nsid=1837365889;c=1733475260908) 2024-12-06T08:54:22,977 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x9b6f2ea72db41dce with lease ID 0xfaab4ccc29629169: from storage DS-0d93c57c-7eaa-47bc-b70c-4cf0a19ad55a node DatanodeRegistration(127.0.0.1:43435, datanodeUuid=0f1873dd-1d16-4804-87d4-af54c97f4833, infoPort=40153, infoSecurePort=0, ipcPort=36239, storageInfo=lv=-57;cid=testClusterID;nsid=1837365889;c=1733475260908), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-06T08:54:22,977 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xd06041ca7c33bb8a with lease ID 0xfaab4ccc29629168: Processing first storage report for DS-919cd0d1-bdd6-4d49-8597-0bf0dbadc5f9 from datanode DatanodeRegistration(127.0.0.1:32979, datanodeUuid=33a579dd-2608-4ac0-bdc6-1d7cd3f53e32, infoPort=43805, infoSecurePort=0, ipcPort=38231, storageInfo=lv=-57;cid=testClusterID;nsid=1837365889;c=1733475260908) 2024-12-06T08:54:22,977 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd06041ca7c33bb8a with lease ID 0xfaab4ccc29629168: from storage DS-919cd0d1-bdd6-4d49-8597-0bf0dbadc5f9 node DatanodeRegistration(127.0.0.1:32979, datanodeUuid=33a579dd-2608-4ac0-bdc6-1d7cd3f53e32, infoPort=43805, infoSecurePort=0, ipcPort=38231, storageInfo=lv=-57;cid=testClusterID;nsid=1837365889;c=1733475260908), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-06T08:54:22,978 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x9b6f2ea72db41dce with lease ID 0xfaab4ccc29629169: Processing first storage report for DS-0aab51b2-51c9-4f5c-98f8-cbca7671dc2e from datanode DatanodeRegistration(127.0.0.1:43435, datanodeUuid=0f1873dd-1d16-4804-87d4-af54c97f4833, infoPort=40153, infoSecurePort=0, ipcPort=36239, storageInfo=lv=-57;cid=testClusterID;nsid=1837365889;c=1733475260908) 2024-12-06T08:54:22,978 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x9b6f2ea72db41dce with lease ID 0xfaab4ccc29629169: from storage DS-0aab51b2-51c9-4f5c-98f8-cbca7671dc2e node DatanodeRegistration(127.0.0.1:43435, datanodeUuid=0f1873dd-1d16-4804-87d4-af54c97f4833, infoPort=40153, infoSecurePort=0, ipcPort=36239, storageInfo=lv=-57;cid=testClusterID;nsid=1837365889;c=1733475260908), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-06T08:54:22,997 WARN [Thread-139 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8ddbb958-65a4-ef8c-2a6d-24135ce9116d/cluster_b85cb446-0f54-2bc9-dc8e-bbf053994405/data/data5/current/BP-291478311-172.17.0.2-1733475260908/current, will proceed with Du for space computation calculation, 2024-12-06T08:54:22,997 WARN [Thread-140 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8ddbb958-65a4-ef8c-2a6d-24135ce9116d/cluster_b85cb446-0f54-2bc9-dc8e-bbf053994405/data/data6/current/BP-291478311-172.17.0.2-1733475260908/current, will proceed with Du for space computation calculation, 2024-12-06T08:54:23,024 WARN [Thread-123 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-06T08:54:23,032 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x649f36068b49d33e with lease ID 0xfaab4ccc2962916a: Processing first storage report for DS-757c0f5c-5227-42ef-84e9-f08391d8d4ac from datanode DatanodeRegistration(127.0.0.1:40317, datanodeUuid=f60d1d2b-cb74-4250-94d7-087a9597ca1b, infoPort=37687, infoSecurePort=0, ipcPort=46125, storageInfo=lv=-57;cid=testClusterID;nsid=1837365889;c=1733475260908) 2024-12-06T08:54:23,033 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x649f36068b49d33e with lease ID 0xfaab4ccc2962916a: from storage DS-757c0f5c-5227-42ef-84e9-f08391d8d4ac node DatanodeRegistration(127.0.0.1:40317, datanodeUuid=f60d1d2b-cb74-4250-94d7-087a9597ca1b, infoPort=37687, infoSecurePort=0, ipcPort=46125, storageInfo=lv=-57;cid=testClusterID;nsid=1837365889;c=1733475260908), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-06T08:54:23,033 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x649f36068b49d33e with lease ID 0xfaab4ccc2962916a: Processing first storage report for DS-7f9f9134-5c99-4f7a-8291-3b13708df185 from datanode DatanodeRegistration(127.0.0.1:40317, datanodeUuid=f60d1d2b-cb74-4250-94d7-087a9597ca1b, infoPort=37687, infoSecurePort=0, ipcPort=46125, storageInfo=lv=-57;cid=testClusterID;nsid=1837365889;c=1733475260908) 2024-12-06T08:54:23,033 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x649f36068b49d33e with lease ID 0xfaab4ccc2962916a: from storage DS-7f9f9134-5c99-4f7a-8291-3b13708df185 node DatanodeRegistration(127.0.0.1:40317, datanodeUuid=f60d1d2b-cb74-4250-94d7-087a9597ca1b, infoPort=37687, infoSecurePort=0, ipcPort=46125, storageInfo=lv=-57;cid=testClusterID;nsid=1837365889;c=1733475260908), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-06T08:54:23,317 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8ddbb958-65a4-ef8c-2a6d-24135ce9116d 2024-12-06T08:54:23,393 WARN [Time-limited test {}] erasurecode.ErasureCodeNative(55): ISA-L support is not available in your platform... using builtin-java codec where applicable 2024-12-06T08:54:23,450 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestHBaseWalOnEC#testReadWrite[0] Thread=158, OpenFileDescriptor=391, MaxFileDescriptor=1048576, SystemLoadAverage=208, ProcessCount=11, AvailableMemoryMB=9078 2024-12-06T08:54:23,452 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=3, rsPorts=, rsClass=null, numDataNodes=3, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-06T08:54:23,464 INFO [Time-limited test {}] hbase.HBaseTestingUtil(821): NOT STARTING DFS 2024-12-06T08:54:23,553 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8ddbb958-65a4-ef8c-2a6d-24135ce9116d/cluster_b85cb446-0f54-2bc9-dc8e-bbf053994405/zookeeper_0, clientPort=51517, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8ddbb958-65a4-ef8c-2a6d-24135ce9116d/cluster_b85cb446-0f54-2bc9-dc8e-bbf053994405/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8ddbb958-65a4-ef8c-2a6d-24135ce9116d/cluster_b85cb446-0f54-2bc9-dc8e-bbf053994405/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-06T08:54:23,566 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=51517 2024-12-06T08:54:23,579 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T08:54:23,582 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T08:54:23,712 WARN [Time-limited test {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-06T08:54:23,712 WARN [Time-limited test {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-06T08:54:23,771 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-502423260_22 at /127.0.0.1:56276 [Receiving block BP-291478311-172.17.0.2-1733475260908:blk_-9223372036854775792_1001] {}] datanode.DataXceiver(331): 127.0.0.1:40317:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:56276 dst: /127.0.0.1:40317 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:54:23,796 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40317 is added to blk_-9223372036854775792_1002 (size=7) 2024-12-06T08:54:24,193 WARN [Time-limited test {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-06T08:54:24,205 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:37087/user/jenkins/test-data/8d9230e9-842c-a3d1-0f07-c120fc914ad9 with version=8 2024-12-06T08:54:24,205 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:37087/user/jenkins/test-data/8d9230e9-842c-a3d1-0f07-c120fc914ad9/hbase-staging 2024-12-06T08:54:24,302 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-12-06T08:54:24,558 INFO [Time-limited test {}] client.ConnectionUtils(128): master/25494438c68b:0 server-side Connection retries=45 2024-12-06T08:54:24,570 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-06T08:54:24,571 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-06T08:54:24,577 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-06T08:54:24,577 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-06T08:54:24,578 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-06T08:54:24,733 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-06T08:54:24,796 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-12-06T08:54:24,806 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-12-06T08:54:24,810 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-06T08:54:24,838 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 12852 (auto-detected) 2024-12-06T08:54:24,839 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-12-06T08:54:24,858 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:45419 2024-12-06T08:54:24,880 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:45419 connecting to ZooKeeper ensemble=127.0.0.1:51517 2024-12-06T08:54:24,912 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:454190x0, quorum=127.0.0.1:51517, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-06T08:54:24,915 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:45419-0x10066891d110000 connected 2024-12-06T08:54:24,944 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T08:54:24,947 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T08:54:24,958 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:45419-0x10066891d110000, quorum=127.0.0.1:51517, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-06T08:54:24,963 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:37087/user/jenkins/test-data/8d9230e9-842c-a3d1-0f07-c120fc914ad9, hbase.cluster.distributed=false 2024-12-06T08:54:24,992 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:45419-0x10066891d110000, quorum=127.0.0.1:51517, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-06T08:54:24,998 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=45419 2024-12-06T08:54:24,999 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=45419 2024-12-06T08:54:25,000 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=45419 2024-12-06T08:54:25,003 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=45419 2024-12-06T08:54:25,003 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=45419 2024-12-06T08:54:25,123 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/25494438c68b:0 server-side Connection retries=45 2024-12-06T08:54:25,125 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-06T08:54:25,126 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-06T08:54:25,126 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-06T08:54:25,126 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-06T08:54:25,126 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-06T08:54:25,130 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-06T08:54:25,132 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-06T08:54:25,133 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:34279 2024-12-06T08:54:25,135 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:34279 connecting to ZooKeeper ensemble=127.0.0.1:51517 2024-12-06T08:54:25,137 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T08:54:25,140 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T08:54:25,147 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:342790x0, quorum=127.0.0.1:51517, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-06T08:54:25,148 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:34279-0x10066891d110001 connected 2024-12-06T08:54:25,148 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34279-0x10066891d110001, quorum=127.0.0.1:51517, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-06T08:54:25,153 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-06T08:54:25,162 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-06T08:54:25,165 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34279-0x10066891d110001, quorum=127.0.0.1:51517, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-06T08:54:25,172 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34279-0x10066891d110001, quorum=127.0.0.1:51517, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-06T08:54:25,176 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=34279 2024-12-06T08:54:25,176 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=34279 2024-12-06T08:54:25,178 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=34279 2024-12-06T08:54:25,179 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=34279 2024-12-06T08:54:25,179 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=34279 2024-12-06T08:54:25,198 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/25494438c68b:0 server-side Connection retries=45 2024-12-06T08:54:25,198 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-06T08:54:25,198 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-06T08:54:25,199 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-06T08:54:25,199 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-06T08:54:25,199 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-06T08:54:25,199 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-06T08:54:25,200 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-06T08:54:25,202 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:35585 2024-12-06T08:54:25,203 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:35585 connecting to ZooKeeper ensemble=127.0.0.1:51517 2024-12-06T08:54:25,205 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T08:54:25,208 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T08:54:25,214 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:355850x0, quorum=127.0.0.1:51517, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-06T08:54:25,214 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:35585-0x10066891d110002 connected 2024-12-06T08:54:25,215 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35585-0x10066891d110002, quorum=127.0.0.1:51517, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-06T08:54:25,215 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-06T08:54:25,216 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-06T08:54:25,217 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35585-0x10066891d110002, quorum=127.0.0.1:51517, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-06T08:54:25,219 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35585-0x10066891d110002, quorum=127.0.0.1:51517, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-06T08:54:25,220 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=35585 2024-12-06T08:54:25,220 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=35585 2024-12-06T08:54:25,221 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=35585 2024-12-06T08:54:25,221 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=35585 2024-12-06T08:54:25,222 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=35585 2024-12-06T08:54:25,243 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/25494438c68b:0 server-side Connection retries=45 2024-12-06T08:54:25,243 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-06T08:54:25,243 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-06T08:54:25,243 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-06T08:54:25,244 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-06T08:54:25,244 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-06T08:54:25,244 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-06T08:54:25,245 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-06T08:54:25,246 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:37829 2024-12-06T08:54:25,248 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:37829 connecting to ZooKeeper ensemble=127.0.0.1:51517 2024-12-06T08:54:25,250 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T08:54:25,254 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T08:54:25,260 DEBUG [pool-77-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:378290x0, quorum=127.0.0.1:51517, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-06T08:54:25,261 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:37829-0x10066891d110003 connected 2024-12-06T08:54:25,261 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37829-0x10066891d110003, quorum=127.0.0.1:51517, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-06T08:54:25,262 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-06T08:54:25,263 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-06T08:54:25,264 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37829-0x10066891d110003, quorum=127.0.0.1:51517, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-06T08:54:25,267 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37829-0x10066891d110003, quorum=127.0.0.1:51517, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-06T08:54:25,268 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=37829 2024-12-06T08:54:25,268 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=37829 2024-12-06T08:54:25,273 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=37829 2024-12-06T08:54:25,276 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=37829 2024-12-06T08:54:25,276 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=37829 2024-12-06T08:54:25,292 DEBUG [M:0;25494438c68b:45419 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;25494438c68b:45419 2024-12-06T08:54:25,293 INFO [master/25494438c68b:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/25494438c68b,45419,1733475264356 2024-12-06T08:54:25,301 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45419-0x10066891d110000, quorum=127.0.0.1:51517, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-06T08:54:25,301 DEBUG [pool-77-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37829-0x10066891d110003, quorum=127.0.0.1:51517, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-06T08:54:25,301 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35585-0x10066891d110002, quorum=127.0.0.1:51517, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-06T08:54:25,301 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34279-0x10066891d110001, quorum=127.0.0.1:51517, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-06T08:54:25,304 DEBUG [master/25494438c68b:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:45419-0x10066891d110000, quorum=127.0.0.1:51517, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/25494438c68b,45419,1733475264356 2024-12-06T08:54:25,326 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35585-0x10066891d110002, quorum=127.0.0.1:51517, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-06T08:54:25,326 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34279-0x10066891d110001, quorum=127.0.0.1:51517, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-06T08:54:25,326 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34279-0x10066891d110001, quorum=127.0.0.1:51517, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T08:54:25,326 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35585-0x10066891d110002, quorum=127.0.0.1:51517, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T08:54:25,326 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45419-0x10066891d110000, quorum=127.0.0.1:51517, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T08:54:25,327 DEBUG [pool-77-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37829-0x10066891d110003, quorum=127.0.0.1:51517, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-06T08:54:25,327 DEBUG [pool-77-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37829-0x10066891d110003, quorum=127.0.0.1:51517, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T08:54:25,328 DEBUG [master/25494438c68b:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:45419-0x10066891d110000, quorum=127.0.0.1:51517, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-06T08:54:25,330 INFO [master/25494438c68b:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/25494438c68b,45419,1733475264356 from backup master directory 2024-12-06T08:54:25,333 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35585-0x10066891d110002, quorum=127.0.0.1:51517, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-06T08:54:25,333 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34279-0x10066891d110001, quorum=127.0.0.1:51517, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-06T08:54:25,333 DEBUG [pool-77-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37829-0x10066891d110003, quorum=127.0.0.1:51517, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-06T08:54:25,333 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45419-0x10066891d110000, quorum=127.0.0.1:51517, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/25494438c68b,45419,1733475264356 2024-12-06T08:54:25,334 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45419-0x10066891d110000, quorum=127.0.0.1:51517, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-06T08:54:25,334 WARN [master/25494438c68b:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-06T08:54:25,335 INFO [master/25494438c68b:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=25494438c68b,45419,1733475264356 2024-12-06T08:54:25,337 INFO [master/25494438c68b:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-12-06T08:54:25,339 INFO [master/25494438c68b:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-12-06T08:54:25,410 DEBUG [master/25494438c68b:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:37087/user/jenkins/test-data/8d9230e9-842c-a3d1-0f07-c120fc914ad9/hbase.id] with ID: 6f6c7ab0-c897-44d6-81df-33df81a36918 2024-12-06T08:54:25,410 DEBUG [master/25494438c68b:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:37087/user/jenkins/test-data/8d9230e9-842c-a3d1-0f07-c120fc914ad9/.tmp/hbase.id 2024-12-06T08:54:25,417 WARN [master/25494438c68b:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-06T08:54:25,418 WARN [master/25494438c68b:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-06T08:54:25,422 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-502423260_22 at /127.0.0.1:56316 [Receiving block BP-291478311-172.17.0.2-1733475260908:blk_-9223372036854775776_1003] {}] datanode.DataXceiver(331): 127.0.0.1:40317:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:56316 dst: /127.0.0.1:40317 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:54:25,429 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40317 is added to blk_-9223372036854775776_1004 (size=42) 2024-12-06T08:54:25,430 WARN [master/25494438c68b:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-06T08:54:25,430 DEBUG [master/25494438c68b:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:37087/user/jenkins/test-data/8d9230e9-842c-a3d1-0f07-c120fc914ad9/.tmp/hbase.id]:[hdfs://localhost:37087/user/jenkins/test-data/8d9230e9-842c-a3d1-0f07-c120fc914ad9/hbase.id] 2024-12-06T08:54:25,478 INFO [master/25494438c68b:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T08:54:25,484 INFO [master/25494438c68b:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-06T08:54:25,504 INFO [master/25494438c68b:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 18ms. 2024-12-06T08:54:25,509 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35585-0x10066891d110002, quorum=127.0.0.1:51517, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T08:54:25,509 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34279-0x10066891d110001, quorum=127.0.0.1:51517, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T08:54:25,509 DEBUG [pool-77-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37829-0x10066891d110003, quorum=127.0.0.1:51517, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T08:54:25,510 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45419-0x10066891d110000, quorum=127.0.0.1:51517, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T08:54:25,523 WARN [master/25494438c68b:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-06T08:54:25,523 WARN [master/25494438c68b:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-06T08:54:25,530 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-502423260_22 at /127.0.0.1:59320 [Receiving block BP-291478311-172.17.0.2-1733475260908:blk_-9223372036854775760_1005] {}] datanode.DataXceiver(331): 127.0.0.1:43435:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:59320 dst: /127.0.0.1:43435 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:54:25,535 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43435 is added to blk_-9223372036854775760_1006 (size=196) 2024-12-06T08:54:25,537 WARN [master/25494438c68b:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-06T08:54:25,555 INFO [master/25494438c68b:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-06T08:54:25,557 INFO [master/25494438c68b:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-06T08:54:25,565 INFO [master/25494438c68b:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-06T08:54:25,598 WARN [master/25494438c68b:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-06T08:54:25,599 WARN [master/25494438c68b:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-06T08:54:25,603 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-502423260_22 at /127.0.0.1:45144 [Receiving block BP-291478311-172.17.0.2-1733475260908:blk_-9223372036854775744_1007] {}] datanode.DataXceiver(331): 127.0.0.1:32979:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45144 dst: /127.0.0.1:32979 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:54:25,609 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32979 is added to blk_-9223372036854775744_1008 (size=1189) 2024-12-06T08:54:25,610 WARN [master/25494438c68b:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-06T08:54:25,629 INFO [master/25494438c68b:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:37087/user/jenkins/test-data/8d9230e9-842c-a3d1-0f07-c120fc914ad9/MasterData/data/master/store 2024-12-06T08:54:25,647 WARN [master/25494438c68b:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-06T08:54:25,648 WARN [master/25494438c68b:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-06T08:54:25,651 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-502423260_22 at /127.0.0.1:45160 [Receiving block BP-291478311-172.17.0.2-1733475260908:blk_-9223372036854775728_1009] {}] datanode.DataXceiver(331): 127.0.0.1:32979:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45160 dst: /127.0.0.1:32979 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:54:25,656 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32979 is added to blk_-9223372036854775728_1010 (size=34) 2024-12-06T08:54:25,657 WARN [master/25494438c68b:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-06T08:54:25,662 INFO [master/25494438c68b:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-12-06T08:54:25,665 DEBUG [master/25494438c68b:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T08:54:25,666 DEBUG [master/25494438c68b:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-06T08:54:25,666 INFO [master/25494438c68b:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T08:54:25,666 DEBUG [master/25494438c68b:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T08:54:25,668 DEBUG [master/25494438c68b:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-06T08:54:25,668 DEBUG [master/25494438c68b:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T08:54:25,668 INFO [master/25494438c68b:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T08:54:25,669 DEBUG [master/25494438c68b:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733475265666Disabling compacts and flushes for region at 1733475265666Disabling writes for close at 1733475265668 (+2 ms)Writing region close event to WAL at 1733475265668Closed at 1733475265668 2024-12-06T08:54:25,671 WARN [master/25494438c68b:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:37087/user/jenkins/test-data/8d9230e9-842c-a3d1-0f07-c120fc914ad9/MasterData/data/master/store/.initializing 2024-12-06T08:54:25,672 DEBUG [master/25494438c68b:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:37087/user/jenkins/test-data/8d9230e9-842c-a3d1-0f07-c120fc914ad9/MasterData/WALs/25494438c68b,45419,1733475264356 2024-12-06T08:54:25,681 INFO [master/25494438c68b:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-06T08:54:25,697 INFO [master/25494438c68b:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=25494438c68b%2C45419%2C1733475264356, suffix=, logDir=hdfs://localhost:37087/user/jenkins/test-data/8d9230e9-842c-a3d1-0f07-c120fc914ad9/MasterData/WALs/25494438c68b,45419,1733475264356, archiveDir=hdfs://localhost:37087/user/jenkins/test-data/8d9230e9-842c-a3d1-0f07-c120fc914ad9/MasterData/oldWALs, maxLogs=10 2024-12-06T08:54:25,729 DEBUG [master/25494438c68b:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/8d9230e9-842c-a3d1-0f07-c120fc914ad9/MasterData/WALs/25494438c68b,45419,1733475264356/25494438c68b%2C45419%2C1733475264356.1733475265701, exclude list is [], retry=0 2024-12-06T08:54:25,751 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(244): No decryptEncryptedDataEncryptionKey method in DFSClient, should be hadoop version with HDFS-12396 java.lang.NoSuchMethodException: org.apache.hadoop.hdfs.DFSClient.decryptEncryptedDataEncryptionKey(org.apache.hadoop.fs.FileEncryptionInfo) at java.lang.Class.getDeclaredMethod(Class.java:2675) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelperWithoutHDFS12396(FanOutOneBlockAsyncDFSOutputSaslHelper.java:183) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelper(FanOutOneBlockAsyncDFSOutputSaslHelper.java:242) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.(FanOutOneBlockAsyncDFSOutputSaslHelper.java:253) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.initialize(FanOutOneBlockAsyncDFSOutputHelper.java:413) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper$5.operationComplete(FanOutOneBlockAsyncDFSOutputHelper.java:472) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper$5.operationComplete(FanOutOneBlockAsyncDFSOutputHelper.java:467) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.NettyFutureUtils.lambda$addListener$0(NettyFutureUtils.java:56) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListener0(DefaultPromise.java:590) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListeners0(DefaultPromise.java:583) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListenersNow(DefaultPromise.java:559) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListeners(DefaultPromise.java:492) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.setValue0(DefaultPromise.java:636) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.setSuccess0(DefaultPromise.java:625) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.trySuccess(DefaultPromise.java:105) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPromise.trySuccess(DefaultChannelPromise.java:84) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.fulfillConnectPromise(AbstractEpollChannel.java:658) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.finishConnect(AbstractEpollChannel.java:696) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.epollOutReady(AbstractEpollChannel.java:567) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.processReady(EpollEventLoop.java:491) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:399) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:54:25,752 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43435,DS-0d93c57c-7eaa-47bc-b70c-4cf0a19ad55a,DISK] 2024-12-06T08:54:25,752 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:40317,DS-757c0f5c-5227-42ef-84e9-f08391d8d4ac,DISK] 2024-12-06T08:54:25,752 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:32979,DS-41cd615e-ddf3-441e-837f-3704a1617c48,DISK] 2024-12-06T08:54:25,755 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.ProtobufDecoder(117): Hadoop 3.3 and above shades protobuf. 2024-12-06T08:54:25,797 INFO [master/25494438c68b:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/8d9230e9-842c-a3d1-0f07-c120fc914ad9/MasterData/WALs/25494438c68b,45419,1733475264356/25494438c68b%2C45419%2C1733475264356.1733475265701 2024-12-06T08:54:25,798 DEBUG [master/25494438c68b:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:37687:37687),(127.0.0.1/127.0.0.1:40153:40153),(127.0.0.1/127.0.0.1:43805:43805)] 2024-12-06T08:54:25,799 DEBUG [master/25494438c68b:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-06T08:54:25,799 DEBUG [master/25494438c68b:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T08:54:25,802 DEBUG [master/25494438c68b:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-06T08:54:25,803 DEBUG [master/25494438c68b:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-06T08:54:25,841 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-06T08:54:25,867 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-06T08:54:25,871 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:54:25,874 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T08:54:25,875 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-06T08:54:25,878 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-06T08:54:25,879 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:54:25,880 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-06T08:54:25,880 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-06T08:54:25,883 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-06T08:54:25,883 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:54:25,885 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-06T08:54:25,885 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-06T08:54:25,888 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-06T08:54:25,888 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:54:25,889 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-06T08:54:25,890 DEBUG [master/25494438c68b:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-06T08:54:25,893 DEBUG [master/25494438c68b:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37087/user/jenkins/test-data/8d9230e9-842c-a3d1-0f07-c120fc914ad9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-06T08:54:25,894 DEBUG [master/25494438c68b:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37087/user/jenkins/test-data/8d9230e9-842c-a3d1-0f07-c120fc914ad9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-06T08:54:25,900 DEBUG [master/25494438c68b:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-06T08:54:25,900 DEBUG [master/25494438c68b:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-06T08:54:25,904 DEBUG [master/25494438c68b:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-06T08:54:25,908 DEBUG [master/25494438c68b:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-06T08:54:25,915 DEBUG [master/25494438c68b:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37087/user/jenkins/test-data/8d9230e9-842c-a3d1-0f07-c120fc914ad9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-06T08:54:25,916 INFO [master/25494438c68b:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=60185998, jitterRate=-0.10315874218940735}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-06T08:54:25,922 DEBUG [master/25494438c68b:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733475265816Initializing all the Stores at 1733475265818 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733475265819 (+1 ms)Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733475265819Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733475265820 (+1 ms)Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733475265820Cleaning up temporary data from old regions at 1733475265900 (+80 ms)Region opened successfully at 1733475265922 (+22 ms) 2024-12-06T08:54:25,923 INFO [master/25494438c68b:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-06T08:54:25,954 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43435 is added to blk_-9223372036854775789_1002 (size=7) 2024-12-06T08:54:25,955 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32979 is added to blk_-9223372036854775788_1002 (size=7) 2024-12-06T08:54:25,972 DEBUG [master/25494438c68b:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7434b4bf, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=25494438c68b/172.17.0.2:0 2024-12-06T08:54:26,013 INFO [master/25494438c68b:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-06T08:54:26,025 INFO [master/25494438c68b:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-06T08:54:26,025 INFO [master/25494438c68b:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-06T08:54:26,028 INFO [master/25494438c68b:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-06T08:54:26,030 INFO [master/25494438c68b:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 1 msec 2024-12-06T08:54:26,035 INFO [master/25494438c68b:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 4 msec 2024-12-06T08:54:26,035 INFO [master/25494438c68b:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-06T08:54:26,061 INFO [master/25494438c68b:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-06T08:54:26,070 DEBUG [master/25494438c68b:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45419-0x10066891d110000, quorum=127.0.0.1:51517, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-06T08:54:26,073 DEBUG [master/25494438c68b:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-06T08:54:26,075 INFO [master/25494438c68b:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-06T08:54:26,077 DEBUG [master/25494438c68b:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45419-0x10066891d110000, quorum=127.0.0.1:51517, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-06T08:54:26,079 DEBUG [master/25494438c68b:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-06T08:54:26,081 INFO [master/25494438c68b:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-06T08:54:26,085 DEBUG [master/25494438c68b:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45419-0x10066891d110000, quorum=127.0.0.1:51517, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-06T08:54:26,086 DEBUG [master/25494438c68b:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-06T08:54:26,088 DEBUG [master/25494438c68b:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45419-0x10066891d110000, quorum=127.0.0.1:51517, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-06T08:54:26,090 DEBUG [master/25494438c68b:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-06T08:54:26,107 DEBUG [master/25494438c68b:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45419-0x10066891d110000, quorum=127.0.0.1:51517, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-06T08:54:26,109 DEBUG [master/25494438c68b:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-06T08:54:26,114 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45419-0x10066891d110000, quorum=127.0.0.1:51517, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-06T08:54:26,114 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35585-0x10066891d110002, quorum=127.0.0.1:51517, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-06T08:54:26,114 DEBUG [pool-77-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37829-0x10066891d110003, quorum=127.0.0.1:51517, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-06T08:54:26,114 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34279-0x10066891d110001, quorum=127.0.0.1:51517, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-06T08:54:26,114 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35585-0x10066891d110002, quorum=127.0.0.1:51517, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T08:54:26,114 DEBUG [pool-77-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37829-0x10066891d110003, quorum=127.0.0.1:51517, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T08:54:26,114 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34279-0x10066891d110001, quorum=127.0.0.1:51517, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T08:54:26,115 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45419-0x10066891d110000, quorum=127.0.0.1:51517, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T08:54:26,117 INFO [master/25494438c68b:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=25494438c68b,45419,1733475264356, sessionid=0x10066891d110000, setting cluster-up flag (Was=false) 2024-12-06T08:54:26,132 DEBUG [pool-77-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37829-0x10066891d110003, quorum=127.0.0.1:51517, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T08:54:26,132 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34279-0x10066891d110001, quorum=127.0.0.1:51517, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T08:54:26,132 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35585-0x10066891d110002, quorum=127.0.0.1:51517, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T08:54:26,132 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45419-0x10066891d110000, quorum=127.0.0.1:51517, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T08:54:26,139 DEBUG [master/25494438c68b:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-06T08:54:26,142 DEBUG [master/25494438c68b:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=25494438c68b,45419,1733475264356 2024-12-06T08:54:26,149 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45419-0x10066891d110000, quorum=127.0.0.1:51517, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T08:54:26,149 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35585-0x10066891d110002, quorum=127.0.0.1:51517, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T08:54:26,149 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34279-0x10066891d110001, quorum=127.0.0.1:51517, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T08:54:26,149 DEBUG [pool-77-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37829-0x10066891d110003, quorum=127.0.0.1:51517, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T08:54:26,156 DEBUG [master/25494438c68b:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-06T08:54:26,158 DEBUG [master/25494438c68b:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=25494438c68b,45419,1733475264356 2024-12-06T08:54:26,166 INFO [master/25494438c68b:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:37087/user/jenkins/test-data/8d9230e9-842c-a3d1-0f07-c120fc914ad9/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-06T08:54:26,181 INFO [RS:2;25494438c68b:37829 {}] regionserver.HRegionServer(746): ClusterId : 6f6c7ab0-c897-44d6-81df-33df81a36918 2024-12-06T08:54:26,181 INFO [RS:1;25494438c68b:35585 {}] regionserver.HRegionServer(746): ClusterId : 6f6c7ab0-c897-44d6-81df-33df81a36918 2024-12-06T08:54:26,181 INFO [RS:0;25494438c68b:34279 {}] regionserver.HRegionServer(746): ClusterId : 6f6c7ab0-c897-44d6-81df-33df81a36918 2024-12-06T08:54:26,184 DEBUG [RS:1;25494438c68b:35585 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-06T08:54:26,184 DEBUG [RS:0;25494438c68b:34279 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-06T08:54:26,184 DEBUG [RS:2;25494438c68b:37829 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-06T08:54:26,190 DEBUG [RS:2;25494438c68b:37829 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-06T08:54:26,190 DEBUG [RS:0;25494438c68b:34279 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-06T08:54:26,190 DEBUG [RS:1;25494438c68b:35585 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-06T08:54:26,190 DEBUG [RS:2;25494438c68b:37829 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-06T08:54:26,190 DEBUG [RS:0;25494438c68b:34279 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-06T08:54:26,190 DEBUG [RS:1;25494438c68b:35585 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-06T08:54:26,195 DEBUG [RS:2;25494438c68b:37829 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-06T08:54:26,195 DEBUG [RS:1;25494438c68b:35585 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-06T08:54:26,196 DEBUG [RS:0;25494438c68b:34279 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-06T08:54:26,196 DEBUG [RS:2;25494438c68b:37829 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6c488251, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=25494438c68b/172.17.0.2:0 2024-12-06T08:54:26,196 DEBUG [RS:1;25494438c68b:35585 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@33128d4f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=25494438c68b/172.17.0.2:0 2024-12-06T08:54:26,196 DEBUG [RS:0;25494438c68b:34279 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@b5f546c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=25494438c68b/172.17.0.2:0 2024-12-06T08:54:26,218 DEBUG [RS:0;25494438c68b:34279 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;25494438c68b:34279 2024-12-06T08:54:26,220 DEBUG [RS:2;25494438c68b:37829 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:2;25494438c68b:37829 2024-12-06T08:54:26,221 DEBUG [RS:1;25494438c68b:35585 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;25494438c68b:35585 2024-12-06T08:54:26,223 INFO [RS:2;25494438c68b:37829 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-06T08:54:26,223 INFO [RS:0;25494438c68b:34279 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-06T08:54:26,223 INFO [RS:1;25494438c68b:35585 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-06T08:54:26,223 INFO [RS:2;25494438c68b:37829 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-06T08:54:26,223 INFO [RS:1;25494438c68b:35585 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-06T08:54:26,223 INFO [RS:0;25494438c68b:34279 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-06T08:54:26,223 DEBUG [RS:2;25494438c68b:37829 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-06T08:54:26,223 DEBUG [RS:0;25494438c68b:34279 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-06T08:54:26,223 DEBUG [RS:1;25494438c68b:35585 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-06T08:54:26,226 INFO [RS:0;25494438c68b:34279 {}] regionserver.HRegionServer(2659): reportForDuty to master=25494438c68b,45419,1733475264356 with port=34279, startcode=1733475265079 2024-12-06T08:54:26,226 INFO [RS:2;25494438c68b:37829 {}] regionserver.HRegionServer(2659): reportForDuty to master=25494438c68b,45419,1733475264356 with port=37829, startcode=1733475265242 2024-12-06T08:54:26,226 INFO [RS:1;25494438c68b:35585 {}] regionserver.HRegionServer(2659): reportForDuty to master=25494438c68b,45419,1733475264356 with port=35585, startcode=1733475265197 2024-12-06T08:54:26,241 DEBUG [RS:0;25494438c68b:34279 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-06T08:54:26,241 DEBUG [RS:2;25494438c68b:37829 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-06T08:54:26,241 DEBUG [RS:1;25494438c68b:35585 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-06T08:54:26,261 DEBUG [master/25494438c68b:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-06T08:54:26,273 INFO [master/25494438c68b:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-06T08:54:26,282 INFO [master/25494438c68b:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-06T08:54:26,285 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33479, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-12-06T08:54:26,285 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50839, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-12-06T08:54:26,285 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35495, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-12-06T08:54:26,292 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45419 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:667) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-12-06T08:54:26,289 DEBUG [master/25494438c68b:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 25494438c68b,45419,1733475264356 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-06T08:54:26,298 DEBUG [master/25494438c68b:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/25494438c68b:0, corePoolSize=5, maxPoolSize=5 2024-12-06T08:54:26,298 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45419 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:667) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-12-06T08:54:26,298 DEBUG [master/25494438c68b:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/25494438c68b:0, corePoolSize=5, maxPoolSize=5 2024-12-06T08:54:26,298 DEBUG [master/25494438c68b:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/25494438c68b:0, corePoolSize=5, maxPoolSize=5 2024-12-06T08:54:26,298 DEBUG [master/25494438c68b:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/25494438c68b:0, corePoolSize=5, maxPoolSize=5 2024-12-06T08:54:26,298 DEBUG [master/25494438c68b:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/25494438c68b:0, corePoolSize=10, maxPoolSize=10 2024-12-06T08:54:26,299 DEBUG [master/25494438c68b:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/25494438c68b:0, corePoolSize=1, maxPoolSize=1 2024-12-06T08:54:26,299 DEBUG [master/25494438c68b:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/25494438c68b:0, corePoolSize=2, maxPoolSize=2 2024-12-06T08:54:26,299 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45419 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:667) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-12-06T08:54:26,299 DEBUG [master/25494438c68b:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/25494438c68b:0, corePoolSize=1, maxPoolSize=1 2024-12-06T08:54:26,305 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-06T08:54:26,306 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-06T08:54:26,311 INFO [master/25494438c68b:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733475296311 2024-12-06T08:54:26,312 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:54:26,313 INFO [master/25494438c68b:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-06T08:54:26,312 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-06T08:54:26,314 INFO [master/25494438c68b:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-06T08:54:26,318 INFO [master/25494438c68b:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-06T08:54:26,318 INFO [master/25494438c68b:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-06T08:54:26,319 INFO [master/25494438c68b:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-06T08:54:26,319 INFO [master/25494438c68b:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-06T08:54:26,327 DEBUG [RS:1;25494438c68b:35585 {}] regionserver.HRegionServer(2683): Master is not running yet 2024-12-06T08:54:26,327 DEBUG [RS:2;25494438c68b:37829 {}] regionserver.HRegionServer(2683): Master is not running yet 2024-12-06T08:54:26,327 DEBUG [RS:0;25494438c68b:34279 {}] regionserver.HRegionServer(2683): Master is not running yet 2024-12-06T08:54:26,327 WARN [RS:1;25494438c68b:35585 {}] regionserver.HRegionServer(841): reportForDuty failed; sleeping 100 ms and then retrying. 2024-12-06T08:54:26,327 WARN [RS:2;25494438c68b:37829 {}] regionserver.HRegionServer(841): reportForDuty failed; sleeping 100 ms and then retrying. 2024-12-06T08:54:26,327 WARN [RS:0;25494438c68b:34279 {}] regionserver.HRegionServer(841): reportForDuty failed; sleeping 100 ms and then retrying. 2024-12-06T08:54:26,327 INFO [master/25494438c68b:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-06T08:54:26,332 INFO [master/25494438c68b:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-06T08:54:26,332 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-06T08:54:26,332 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-06T08:54:26,333 INFO [master/25494438c68b:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-06T08:54:26,334 INFO [master/25494438c68b:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-06T08:54:26,336 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-502423260_22 at /127.0.0.1:56342 [Receiving block BP-291478311-172.17.0.2-1733475260908:blk_-9223372036854775712_1012] {}] datanode.DataXceiver(331): 127.0.0.1:40317:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:56342 dst: /127.0.0.1:40317 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:54:26,336 INFO [master/25494438c68b:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-06T08:54:26,337 INFO [master/25494438c68b:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-06T08:54:26,339 DEBUG [master/25494438c68b:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/25494438c68b:0:becomeActiveMaster-HFileCleaner.large.0-1733475266338,5,FailOnTimeoutGroup] 2024-12-06T08:54:26,340 DEBUG [master/25494438c68b:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/25494438c68b:0:becomeActiveMaster-HFileCleaner.small.0-1733475266339,5,FailOnTimeoutGroup] 2024-12-06T08:54:26,340 INFO [master/25494438c68b:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-06T08:54:26,340 INFO [master/25494438c68b:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-06T08:54:26,341 INFO [master/25494438c68b:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-06T08:54:26,342 INFO [master/25494438c68b:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-06T08:54:26,343 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40317 is added to blk_-9223372036854775712_1013 (size=1321) 2024-12-06T08:54:26,346 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-06T08:54:26,348 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:37087/user/jenkins/test-data/8d9230e9-842c-a3d1-0f07-c120fc914ad9/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-06T08:54:26,348 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:37087/user/jenkins/test-data/8d9230e9-842c-a3d1-0f07-c120fc914ad9 2024-12-06T08:54:26,354 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-06T08:54:26,354 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-06T08:54:26,357 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-502423260_22 at /127.0.0.1:56368 [Receiving block BP-291478311-172.17.0.2-1733475260908:blk_-9223372036854775696_1014] {}] datanode.DataXceiver(331): 127.0.0.1:40317:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:56368 dst: /127.0.0.1:40317 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:54:26,362 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40317 is added to blk_-9223372036854775696_1015 (size=32) 2024-12-06T08:54:26,363 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-06T08:54:26,365 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T08:54:26,367 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-06T08:54:26,369 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-06T08:54:26,370 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:54:26,370 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T08:54:26,371 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-06T08:54:26,373 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-06T08:54:26,373 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:54:26,374 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T08:54:26,374 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-06T08:54:26,377 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-06T08:54:26,377 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:54:26,378 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T08:54:26,378 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-06T08:54:26,380 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-06T08:54:26,380 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:54:26,381 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T08:54:26,382 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-06T08:54:26,383 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37087/user/jenkins/test-data/8d9230e9-842c-a3d1-0f07-c120fc914ad9/data/hbase/meta/1588230740 2024-12-06T08:54:26,383 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37087/user/jenkins/test-data/8d9230e9-842c-a3d1-0f07-c120fc914ad9/data/hbase/meta/1588230740 2024-12-06T08:54:26,386 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-06T08:54:26,387 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-06T08:54:26,387 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-06T08:54:26,390 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-06T08:54:26,399 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37087/user/jenkins/test-data/8d9230e9-842c-a3d1-0f07-c120fc914ad9/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-06T08:54:26,400 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=70848601, jitterRate=0.05572642385959625}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-06T08:54:26,403 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733475266365Initializing all the Stores at 1733475266366 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733475266367 (+1 ms)Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733475266367Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733475266367Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733475266367Cleaning up temporary data from old regions at 1733475266387 (+20 ms)Region opened successfully at 1733475266403 (+16 ms) 2024-12-06T08:54:26,403 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-06T08:54:26,403 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-06T08:54:26,403 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-06T08:54:26,403 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-06T08:54:26,403 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-06T08:54:26,405 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-06T08:54:26,405 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733475266403Disabling compacts and flushes for region at 1733475266403Disabling writes for close at 1733475266403Writing region close event to WAL at 1733475266404 (+1 ms)Closed at 1733475266405 (+1 ms) 2024-12-06T08:54:26,408 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-06T08:54:26,409 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-06T08:54:26,415 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-06T08:54:26,424 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-06T08:54:26,428 INFO [RS:2;25494438c68b:37829 {}] regionserver.HRegionServer(2659): reportForDuty to master=25494438c68b,45419,1733475264356 with port=37829, startcode=1733475265242 2024-12-06T08:54:26,428 INFO [RS:1;25494438c68b:35585 {}] regionserver.HRegionServer(2659): reportForDuty to master=25494438c68b,45419,1733475264356 with port=35585, startcode=1733475265197 2024-12-06T08:54:26,429 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-06T08:54:26,430 INFO [RS:0;25494438c68b:34279 {}] regionserver.HRegionServer(2659): reportForDuty to master=25494438c68b,45419,1733475264356 with port=34279, startcode=1733475265079 2024-12-06T08:54:26,431 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45419 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 25494438c68b,37829,1733475265242 2024-12-06T08:54:26,433 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45419 {}] master.ServerManager(517): Registering regionserver=25494438c68b,37829,1733475265242 2024-12-06T08:54:26,441 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45419 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 25494438c68b,35585,1733475265197 2024-12-06T08:54:26,441 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45419 {}] master.ServerManager(517): Registering regionserver=25494438c68b,35585,1733475265197 2024-12-06T08:54:26,442 DEBUG [RS:2;25494438c68b:37829 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:37087/user/jenkins/test-data/8d9230e9-842c-a3d1-0f07-c120fc914ad9 2024-12-06T08:54:26,442 DEBUG [RS:2;25494438c68b:37829 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:37087 2024-12-06T08:54:26,442 DEBUG [RS:2;25494438c68b:37829 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-06T08:54:26,444 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45419 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 25494438c68b,34279,1733475265079 2024-12-06T08:54:26,444 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45419 {}] master.ServerManager(517): Registering regionserver=25494438c68b,34279,1733475265079 2024-12-06T08:54:26,444 DEBUG [RS:1;25494438c68b:35585 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:37087/user/jenkins/test-data/8d9230e9-842c-a3d1-0f07-c120fc914ad9 2024-12-06T08:54:26,444 DEBUG [RS:1;25494438c68b:35585 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:37087 2024-12-06T08:54:26,445 DEBUG [RS:1;25494438c68b:35585 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-06T08:54:26,446 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45419-0x10066891d110000, quorum=127.0.0.1:51517, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-06T08:54:26,447 DEBUG [RS:0;25494438c68b:34279 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:37087/user/jenkins/test-data/8d9230e9-842c-a3d1-0f07-c120fc914ad9 2024-12-06T08:54:26,447 DEBUG [RS:0;25494438c68b:34279 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:37087 2024-12-06T08:54:26,447 DEBUG [RS:0;25494438c68b:34279 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-06T08:54:26,452 DEBUG [RS:2;25494438c68b:37829 {}] zookeeper.ZKUtil(111): regionserver:37829-0x10066891d110003, quorum=127.0.0.1:51517, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/25494438c68b,37829,1733475265242 2024-12-06T08:54:26,453 WARN [RS:2;25494438c68b:37829 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-06T08:54:26,453 INFO [RS:2;25494438c68b:37829 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-06T08:54:26,453 DEBUG [RS:2;25494438c68b:37829 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:37087/user/jenkins/test-data/8d9230e9-842c-a3d1-0f07-c120fc914ad9/WALs/25494438c68b,37829,1733475265242 2024-12-06T08:54:26,454 DEBUG [RS:1;25494438c68b:35585 {}] zookeeper.ZKUtil(111): regionserver:35585-0x10066891d110002, quorum=127.0.0.1:51517, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/25494438c68b,35585,1733475265197 2024-12-06T08:54:26,454 WARN [RS:1;25494438c68b:35585 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-06T08:54:26,454 INFO [RS:1;25494438c68b:35585 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-06T08:54:26,455 DEBUG [RS:1;25494438c68b:35585 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:37087/user/jenkins/test-data/8d9230e9-842c-a3d1-0f07-c120fc914ad9/WALs/25494438c68b,35585,1733475265197 2024-12-06T08:54:26,455 DEBUG [RS:0;25494438c68b:34279 {}] zookeeper.ZKUtil(111): regionserver:34279-0x10066891d110001, quorum=127.0.0.1:51517, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/25494438c68b,34279,1733475265079 2024-12-06T08:54:26,455 WARN [RS:0;25494438c68b:34279 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-06T08:54:26,455 INFO [RS:0;25494438c68b:34279 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-06T08:54:26,455 DEBUG [RS:0;25494438c68b:34279 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:37087/user/jenkins/test-data/8d9230e9-842c-a3d1-0f07-c120fc914ad9/WALs/25494438c68b,34279,1733475265079 2024-12-06T08:54:26,456 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [25494438c68b,35585,1733475265197] 2024-12-06T08:54:26,456 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [25494438c68b,34279,1733475265079] 2024-12-06T08:54:26,456 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [25494438c68b,37829,1733475265242] 2024-12-06T08:54:26,484 INFO [RS:1;25494438c68b:35585 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-06T08:54:26,484 INFO [RS:0;25494438c68b:34279 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-06T08:54:26,484 INFO [RS:2;25494438c68b:37829 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-06T08:54:26,502 INFO [RS:1;25494438c68b:35585 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-06T08:54:26,502 INFO [RS:0;25494438c68b:34279 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-06T08:54:26,502 INFO [RS:2;25494438c68b:37829 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-06T08:54:26,509 INFO [RS:1;25494438c68b:35585 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-06T08:54:26,509 INFO [RS:0;25494438c68b:34279 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-06T08:54:26,509 INFO [RS:2;25494438c68b:37829 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-06T08:54:26,509 INFO [RS:0;25494438c68b:34279 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-06T08:54:26,509 INFO [RS:1;25494438c68b:35585 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-06T08:54:26,509 INFO [RS:2;25494438c68b:37829 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-06T08:54:26,510 INFO [RS:1;25494438c68b:35585 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-06T08:54:26,510 INFO [RS:2;25494438c68b:37829 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-06T08:54:26,512 INFO [RS:0;25494438c68b:34279 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-06T08:54:26,516 INFO [RS:2;25494438c68b:37829 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-06T08:54:26,516 INFO [RS:0;25494438c68b:34279 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-06T08:54:26,516 INFO [RS:1;25494438c68b:35585 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-06T08:54:26,518 INFO [RS:0;25494438c68b:34279 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-06T08:54:26,518 INFO [RS:2;25494438c68b:37829 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-06T08:54:26,518 INFO [RS:1;25494438c68b:35585 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-06T08:54:26,518 DEBUG [RS:0;25494438c68b:34279 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/25494438c68b:0, corePoolSize=1, maxPoolSize=1 2024-12-06T08:54:26,518 DEBUG [RS:1;25494438c68b:35585 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/25494438c68b:0, corePoolSize=1, maxPoolSize=1 2024-12-06T08:54:26,518 DEBUG [RS:2;25494438c68b:37829 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/25494438c68b:0, corePoolSize=1, maxPoolSize=1 2024-12-06T08:54:26,519 DEBUG [RS:0;25494438c68b:34279 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/25494438c68b:0, corePoolSize=1, maxPoolSize=1 2024-12-06T08:54:26,519 DEBUG [RS:2;25494438c68b:37829 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/25494438c68b:0, corePoolSize=1, maxPoolSize=1 2024-12-06T08:54:26,519 DEBUG [RS:1;25494438c68b:35585 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/25494438c68b:0, corePoolSize=1, maxPoolSize=1 2024-12-06T08:54:26,519 DEBUG [RS:0;25494438c68b:34279 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/25494438c68b:0, corePoolSize=1, maxPoolSize=1 2024-12-06T08:54:26,519 DEBUG [RS:2;25494438c68b:37829 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/25494438c68b:0, corePoolSize=1, maxPoolSize=1 2024-12-06T08:54:26,519 DEBUG [RS:1;25494438c68b:35585 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/25494438c68b:0, corePoolSize=1, maxPoolSize=1 2024-12-06T08:54:26,519 DEBUG [RS:2;25494438c68b:37829 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/25494438c68b:0, corePoolSize=1, maxPoolSize=1 2024-12-06T08:54:26,519 DEBUG [RS:0;25494438c68b:34279 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/25494438c68b:0, corePoolSize=1, maxPoolSize=1 2024-12-06T08:54:26,519 DEBUG [RS:1;25494438c68b:35585 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/25494438c68b:0, corePoolSize=1, maxPoolSize=1 2024-12-06T08:54:26,519 DEBUG [RS:2;25494438c68b:37829 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/25494438c68b:0, corePoolSize=1, maxPoolSize=1 2024-12-06T08:54:26,519 DEBUG [RS:0;25494438c68b:34279 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/25494438c68b:0, corePoolSize=1, maxPoolSize=1 2024-12-06T08:54:26,519 DEBUG [RS:1;25494438c68b:35585 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/25494438c68b:0, corePoolSize=1, maxPoolSize=1 2024-12-06T08:54:26,519 DEBUG [RS:2;25494438c68b:37829 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/25494438c68b:0, corePoolSize=2, maxPoolSize=2 2024-12-06T08:54:26,519 DEBUG [RS:0;25494438c68b:34279 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/25494438c68b:0, corePoolSize=2, maxPoolSize=2 2024-12-06T08:54:26,519 DEBUG [RS:1;25494438c68b:35585 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/25494438c68b:0, corePoolSize=2, maxPoolSize=2 2024-12-06T08:54:26,519 DEBUG [RS:2;25494438c68b:37829 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/25494438c68b:0, corePoolSize=1, maxPoolSize=1 2024-12-06T08:54:26,519 DEBUG [RS:0;25494438c68b:34279 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/25494438c68b:0, corePoolSize=1, maxPoolSize=1 2024-12-06T08:54:26,520 DEBUG [RS:1;25494438c68b:35585 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/25494438c68b:0, corePoolSize=1, maxPoolSize=1 2024-12-06T08:54:26,520 DEBUG [RS:0;25494438c68b:34279 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/25494438c68b:0, corePoolSize=1, maxPoolSize=1 2024-12-06T08:54:26,520 DEBUG [RS:2;25494438c68b:37829 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/25494438c68b:0, corePoolSize=1, maxPoolSize=1 2024-12-06T08:54:26,520 DEBUG [RS:0;25494438c68b:34279 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/25494438c68b:0, corePoolSize=1, maxPoolSize=1 2024-12-06T08:54:26,520 DEBUG [RS:2;25494438c68b:37829 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/25494438c68b:0, corePoolSize=1, maxPoolSize=1 2024-12-06T08:54:26,520 DEBUG [RS:1;25494438c68b:35585 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/25494438c68b:0, corePoolSize=1, maxPoolSize=1 2024-12-06T08:54:26,520 DEBUG [RS:0;25494438c68b:34279 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/25494438c68b:0, corePoolSize=1, maxPoolSize=1 2024-12-06T08:54:26,520 DEBUG [RS:2;25494438c68b:37829 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/25494438c68b:0, corePoolSize=1, maxPoolSize=1 2024-12-06T08:54:26,520 DEBUG [RS:1;25494438c68b:35585 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/25494438c68b:0, corePoolSize=1, maxPoolSize=1 2024-12-06T08:54:26,520 DEBUG [RS:0;25494438c68b:34279 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/25494438c68b:0, corePoolSize=1, maxPoolSize=1 2024-12-06T08:54:26,520 DEBUG [RS:2;25494438c68b:37829 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/25494438c68b:0, corePoolSize=1, maxPoolSize=1 2024-12-06T08:54:26,520 DEBUG [RS:1;25494438c68b:35585 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/25494438c68b:0, corePoolSize=1, maxPoolSize=1 2024-12-06T08:54:26,520 DEBUG [RS:0;25494438c68b:34279 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/25494438c68b:0, corePoolSize=1, maxPoolSize=1 2024-12-06T08:54:26,520 DEBUG [RS:2;25494438c68b:37829 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/25494438c68b:0, corePoolSize=1, maxPoolSize=1 2024-12-06T08:54:26,520 DEBUG [RS:1;25494438c68b:35585 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/25494438c68b:0, corePoolSize=1, maxPoolSize=1 2024-12-06T08:54:26,521 DEBUG [RS:0;25494438c68b:34279 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/25494438c68b:0, corePoolSize=3, maxPoolSize=3 2024-12-06T08:54:26,521 DEBUG [RS:2;25494438c68b:37829 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/25494438c68b:0, corePoolSize=3, maxPoolSize=3 2024-12-06T08:54:26,521 DEBUG [RS:1;25494438c68b:35585 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/25494438c68b:0, corePoolSize=1, maxPoolSize=1 2024-12-06T08:54:26,521 DEBUG [RS:2;25494438c68b:37829 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/25494438c68b:0, corePoolSize=3, maxPoolSize=3 2024-12-06T08:54:26,521 DEBUG [RS:0;25494438c68b:34279 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/25494438c68b:0, corePoolSize=3, maxPoolSize=3 2024-12-06T08:54:26,521 DEBUG [RS:1;25494438c68b:35585 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/25494438c68b:0, corePoolSize=3, maxPoolSize=3 2024-12-06T08:54:26,521 DEBUG [RS:1;25494438c68b:35585 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/25494438c68b:0, corePoolSize=3, maxPoolSize=3 2024-12-06T08:54:26,522 INFO [RS:0;25494438c68b:34279 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-06T08:54:26,522 INFO [RS:2;25494438c68b:37829 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-06T08:54:26,522 INFO [RS:1;25494438c68b:35585 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-06T08:54:26,522 INFO [RS:0;25494438c68b:34279 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-06T08:54:26,522 INFO [RS:2;25494438c68b:37829 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-06T08:54:26,522 INFO [RS:1;25494438c68b:35585 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-06T08:54:26,522 INFO [RS:0;25494438c68b:34279 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-06T08:54:26,523 INFO [RS:2;25494438c68b:37829 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-06T08:54:26,523 INFO [RS:1;25494438c68b:35585 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-06T08:54:26,523 INFO [RS:0;25494438c68b:34279 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-06T08:54:26,523 INFO [RS:1;25494438c68b:35585 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-06T08:54:26,523 INFO [RS:2;25494438c68b:37829 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-06T08:54:26,523 INFO [RS:0;25494438c68b:34279 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-06T08:54:26,523 INFO [RS:1;25494438c68b:35585 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-06T08:54:26,523 INFO [RS:2;25494438c68b:37829 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-06T08:54:26,523 INFO [RS:0;25494438c68b:34279 {}] hbase.ChoreService(168): Chore ScheduledChore name=25494438c68b,34279,1733475265079-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-06T08:54:26,523 INFO [RS:1;25494438c68b:35585 {}] hbase.ChoreService(168): Chore ScheduledChore name=25494438c68b,35585,1733475265197-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-06T08:54:26,523 INFO [RS:2;25494438c68b:37829 {}] hbase.ChoreService(168): Chore ScheduledChore name=25494438c68b,37829,1733475265242-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-06T08:54:26,544 INFO [RS:1;25494438c68b:35585 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-06T08:54:26,546 INFO [RS:1;25494438c68b:35585 {}] hbase.ChoreService(168): Chore ScheduledChore name=25494438c68b,35585,1733475265197-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-06T08:54:26,546 INFO [RS:1;25494438c68b:35585 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-06T08:54:26,546 INFO [RS:1;25494438c68b:35585 {}] regionserver.Replication(171): 25494438c68b,35585,1733475265197 started 2024-12-06T08:54:26,547 INFO [RS:0;25494438c68b:34279 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-06T08:54:26,547 INFO [RS:2;25494438c68b:37829 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-06T08:54:26,548 INFO [RS:0;25494438c68b:34279 {}] hbase.ChoreService(168): Chore ScheduledChore name=25494438c68b,34279,1733475265079-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-06T08:54:26,548 INFO [RS:2;25494438c68b:37829 {}] hbase.ChoreService(168): Chore ScheduledChore name=25494438c68b,37829,1733475265242-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-06T08:54:26,548 INFO [RS:2;25494438c68b:37829 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-06T08:54:26,548 INFO [RS:2;25494438c68b:37829 {}] regionserver.Replication(171): 25494438c68b,37829,1733475265242 started 2024-12-06T08:54:26,548 INFO [RS:0;25494438c68b:34279 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-06T08:54:26,548 INFO [RS:0;25494438c68b:34279 {}] regionserver.Replication(171): 25494438c68b,34279,1733475265079 started 2024-12-06T08:54:26,565 INFO [RS:1;25494438c68b:35585 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-06T08:54:26,566 INFO [RS:1;25494438c68b:35585 {}] regionserver.HRegionServer(1482): Serving as 25494438c68b,35585,1733475265197, RpcServer on 25494438c68b/172.17.0.2:35585, sessionid=0x10066891d110002 2024-12-06T08:54:26,567 DEBUG [RS:1;25494438c68b:35585 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-06T08:54:26,567 DEBUG [RS:1;25494438c68b:35585 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 25494438c68b,35585,1733475265197 2024-12-06T08:54:26,567 DEBUG [RS:1;25494438c68b:35585 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '25494438c68b,35585,1733475265197' 2024-12-06T08:54:26,567 DEBUG [RS:1;25494438c68b:35585 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-06T08:54:26,568 DEBUG [RS:1;25494438c68b:35585 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-06T08:54:26,569 DEBUG [RS:1;25494438c68b:35585 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-06T08:54:26,569 DEBUG [RS:1;25494438c68b:35585 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-06T08:54:26,569 DEBUG [RS:1;25494438c68b:35585 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 25494438c68b,35585,1733475265197 2024-12-06T08:54:26,569 DEBUG [RS:1;25494438c68b:35585 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '25494438c68b,35585,1733475265197' 2024-12-06T08:54:26,569 DEBUG [RS:1;25494438c68b:35585 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-06T08:54:26,570 DEBUG [RS:1;25494438c68b:35585 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-06T08:54:26,570 DEBUG [RS:1;25494438c68b:35585 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-06T08:54:26,570 INFO [RS:1;25494438c68b:35585 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-06T08:54:26,570 INFO [RS:1;25494438c68b:35585 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-06T08:54:26,572 INFO [RS:2;25494438c68b:37829 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-06T08:54:26,572 INFO [RS:0;25494438c68b:34279 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-06T08:54:26,572 INFO [RS:2;25494438c68b:37829 {}] regionserver.HRegionServer(1482): Serving as 25494438c68b,37829,1733475265242, RpcServer on 25494438c68b/172.17.0.2:37829, sessionid=0x10066891d110003 2024-12-06T08:54:26,572 INFO [RS:0;25494438c68b:34279 {}] regionserver.HRegionServer(1482): Serving as 25494438c68b,34279,1733475265079, RpcServer on 25494438c68b/172.17.0.2:34279, sessionid=0x10066891d110001 2024-12-06T08:54:26,573 DEBUG [RS:2;25494438c68b:37829 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-06T08:54:26,573 DEBUG [RS:0;25494438c68b:34279 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-06T08:54:26,573 DEBUG [RS:2;25494438c68b:37829 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 25494438c68b,37829,1733475265242 2024-12-06T08:54:26,573 DEBUG [RS:0;25494438c68b:34279 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 25494438c68b,34279,1733475265079 2024-12-06T08:54:26,573 DEBUG [RS:2;25494438c68b:37829 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '25494438c68b,37829,1733475265242' 2024-12-06T08:54:26,573 DEBUG [RS:0;25494438c68b:34279 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '25494438c68b,34279,1733475265079' 2024-12-06T08:54:26,573 DEBUG [RS:2;25494438c68b:37829 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-06T08:54:26,573 DEBUG [RS:0;25494438c68b:34279 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-06T08:54:26,573 DEBUG [RS:2;25494438c68b:37829 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-06T08:54:26,573 DEBUG [RS:0;25494438c68b:34279 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-06T08:54:26,574 DEBUG [RS:2;25494438c68b:37829 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-06T08:54:26,574 DEBUG [RS:0;25494438c68b:34279 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-06T08:54:26,574 DEBUG [RS:2;25494438c68b:37829 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-06T08:54:26,574 DEBUG [RS:0;25494438c68b:34279 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-06T08:54:26,574 DEBUG [RS:2;25494438c68b:37829 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 25494438c68b,37829,1733475265242 2024-12-06T08:54:26,574 DEBUG [RS:0;25494438c68b:34279 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 25494438c68b,34279,1733475265079 2024-12-06T08:54:26,574 DEBUG [RS:2;25494438c68b:37829 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '25494438c68b,37829,1733475265242' 2024-12-06T08:54:26,574 DEBUG [RS:0;25494438c68b:34279 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '25494438c68b,34279,1733475265079' 2024-12-06T08:54:26,574 DEBUG [RS:2;25494438c68b:37829 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-06T08:54:26,574 DEBUG [RS:0;25494438c68b:34279 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-06T08:54:26,575 DEBUG [RS:2;25494438c68b:37829 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-06T08:54:26,575 DEBUG [RS:0;25494438c68b:34279 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-06T08:54:26,576 DEBUG [RS:2;25494438c68b:37829 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-06T08:54:26,576 INFO [RS:2;25494438c68b:37829 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-06T08:54:26,576 DEBUG [RS:0;25494438c68b:34279 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-06T08:54:26,576 INFO [RS:2;25494438c68b:37829 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-06T08:54:26,576 INFO [RS:0;25494438c68b:34279 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-06T08:54:26,576 INFO [RS:0;25494438c68b:34279 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-06T08:54:26,580 WARN [25494438c68b:45419 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-06T08:54:26,676 INFO [RS:1;25494438c68b:35585 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-06T08:54:26,677 INFO [RS:2;25494438c68b:37829 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-06T08:54:26,677 INFO [RS:0;25494438c68b:34279 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-06T08:54:26,680 INFO [RS:1;25494438c68b:35585 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=25494438c68b%2C35585%2C1733475265197, suffix=, logDir=hdfs://localhost:37087/user/jenkins/test-data/8d9230e9-842c-a3d1-0f07-c120fc914ad9/WALs/25494438c68b,35585,1733475265197, archiveDir=hdfs://localhost:37087/user/jenkins/test-data/8d9230e9-842c-a3d1-0f07-c120fc914ad9/oldWALs, maxLogs=32 2024-12-06T08:54:26,680 INFO [RS:2;25494438c68b:37829 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=25494438c68b%2C37829%2C1733475265242, suffix=, logDir=hdfs://localhost:37087/user/jenkins/test-data/8d9230e9-842c-a3d1-0f07-c120fc914ad9/WALs/25494438c68b,37829,1733475265242, archiveDir=hdfs://localhost:37087/user/jenkins/test-data/8d9230e9-842c-a3d1-0f07-c120fc914ad9/oldWALs, maxLogs=32 2024-12-06T08:54:26,680 INFO [RS:0;25494438c68b:34279 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=25494438c68b%2C34279%2C1733475265079, suffix=, logDir=hdfs://localhost:37087/user/jenkins/test-data/8d9230e9-842c-a3d1-0f07-c120fc914ad9/WALs/25494438c68b,34279,1733475265079, archiveDir=hdfs://localhost:37087/user/jenkins/test-data/8d9230e9-842c-a3d1-0f07-c120fc914ad9/oldWALs, maxLogs=32 2024-12-06T08:54:26,700 DEBUG [RS:1;25494438c68b:35585 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/8d9230e9-842c-a3d1-0f07-c120fc914ad9/WALs/25494438c68b,35585,1733475265197/25494438c68b%2C35585%2C1733475265197.1733475266685, exclude list is [], retry=0 2024-12-06T08:54:26,706 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43435,DS-0d93c57c-7eaa-47bc-b70c-4cf0a19ad55a,DISK] 2024-12-06T08:54:26,706 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:40317,DS-757c0f5c-5227-42ef-84e9-f08391d8d4ac,DISK] 2024-12-06T08:54:26,706 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:32979,DS-41cd615e-ddf3-441e-837f-3704a1617c48,DISK] 2024-12-06T08:54:26,714 INFO [RS:1;25494438c68b:35585 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/8d9230e9-842c-a3d1-0f07-c120fc914ad9/WALs/25494438c68b,35585,1733475265197/25494438c68b%2C35585%2C1733475265197.1733475266685 2024-12-06T08:54:26,714 DEBUG [RS:1;25494438c68b:35585 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:37687:37687),(127.0.0.1/127.0.0.1:40153:40153),(127.0.0.1/127.0.0.1:43805:43805)] 2024-12-06T08:54:26,736 DEBUG [RS:2;25494438c68b:37829 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/8d9230e9-842c-a3d1-0f07-c120fc914ad9/WALs/25494438c68b,37829,1733475265242/25494438c68b%2C37829%2C1733475265242.1733475266686, exclude list is [], retry=0 2024-12-06T08:54:26,736 DEBUG [RS:0;25494438c68b:34279 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/8d9230e9-842c-a3d1-0f07-c120fc914ad9/WALs/25494438c68b,34279,1733475265079/25494438c68b%2C34279%2C1733475265079.1733475266686, exclude list is [], retry=0 2024-12-06T08:54:26,742 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43435,DS-0d93c57c-7eaa-47bc-b70c-4cf0a19ad55a,DISK] 2024-12-06T08:54:26,742 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:40317,DS-757c0f5c-5227-42ef-84e9-f08391d8d4ac,DISK] 2024-12-06T08:54:26,742 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:32979,DS-41cd615e-ddf3-441e-837f-3704a1617c48,DISK] 2024-12-06T08:54:26,742 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:32979,DS-41cd615e-ddf3-441e-837f-3704a1617c48,DISK] 2024-12-06T08:54:26,742 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43435,DS-0d93c57c-7eaa-47bc-b70c-4cf0a19ad55a,DISK] 2024-12-06T08:54:26,742 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:40317,DS-757c0f5c-5227-42ef-84e9-f08391d8d4ac,DISK] 2024-12-06T08:54:26,754 INFO [RS:0;25494438c68b:34279 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/8d9230e9-842c-a3d1-0f07-c120fc914ad9/WALs/25494438c68b,34279,1733475265079/25494438c68b%2C34279%2C1733475265079.1733475266686 2024-12-06T08:54:26,754 INFO [RS:2;25494438c68b:37829 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/8d9230e9-842c-a3d1-0f07-c120fc914ad9/WALs/25494438c68b,37829,1733475265242/25494438c68b%2C37829%2C1733475265242.1733475266686 2024-12-06T08:54:26,755 DEBUG [RS:0;25494438c68b:34279 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:37687:37687),(127.0.0.1/127.0.0.1:43805:43805),(127.0.0.1/127.0.0.1:40153:40153)] 2024-12-06T08:54:26,755 DEBUG [RS:2;25494438c68b:37829 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:40153:40153),(127.0.0.1/127.0.0.1:43805:43805),(127.0.0.1/127.0.0.1:37687:37687)] 2024-12-06T08:54:26,833 DEBUG [25494438c68b:45419 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=3, allServersCount=3 2024-12-06T08:54:26,843 DEBUG [25494438c68b:45419 {}] balancer.BalancerClusterState(204): Hosts are {25494438c68b=0} racks are {/default-rack=0} 2024-12-06T08:54:26,851 DEBUG [25494438c68b:45419 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-06T08:54:26,851 DEBUG [25494438c68b:45419 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-06T08:54:26,851 DEBUG [25494438c68b:45419 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-06T08:54:26,851 DEBUG [25494438c68b:45419 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-06T08:54:26,851 DEBUG [25494438c68b:45419 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-06T08:54:26,851 DEBUG [25494438c68b:45419 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-06T08:54:26,851 INFO [25494438c68b:45419 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-06T08:54:26,851 INFO [25494438c68b:45419 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-06T08:54:26,851 INFO [25494438c68b:45419 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-06T08:54:26,851 DEBUG [25494438c68b:45419 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-06T08:54:26,859 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=25494438c68b,35585,1733475265197 2024-12-06T08:54:26,866 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 25494438c68b,35585,1733475265197, state=OPENING 2024-12-06T08:54:26,872 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-06T08:54:26,874 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45419-0x10066891d110000, quorum=127.0.0.1:51517, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T08:54:26,874 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35585-0x10066891d110002, quorum=127.0.0.1:51517, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T08:54:26,874 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34279-0x10066891d110001, quorum=127.0.0.1:51517, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T08:54:26,874 DEBUG [pool-77-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37829-0x10066891d110003, quorum=127.0.0.1:51517, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T08:54:26,875 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-06T08:54:26,875 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-06T08:54:26,875 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-06T08:54:26,875 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-06T08:54:26,877 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-06T08:54:26,879 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=25494438c68b,35585,1733475265197}] 2024-12-06T08:54:27,056 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-06T08:54:27,059 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50281, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-06T08:54:27,071 INFO [RS_OPEN_META-regionserver/25494438c68b:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-06T08:54:27,071 INFO [RS_OPEN_META-regionserver/25494438c68b:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-06T08:54:27,072 INFO [RS_OPEN_META-regionserver/25494438c68b:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-12-06T08:54:27,075 INFO [RS_OPEN_META-regionserver/25494438c68b:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=25494438c68b%2C35585%2C1733475265197.meta, suffix=.meta, logDir=hdfs://localhost:37087/user/jenkins/test-data/8d9230e9-842c-a3d1-0f07-c120fc914ad9/WALs/25494438c68b,35585,1733475265197, archiveDir=hdfs://localhost:37087/user/jenkins/test-data/8d9230e9-842c-a3d1-0f07-c120fc914ad9/oldWALs, maxLogs=32 2024-12-06T08:54:27,092 DEBUG [RS_OPEN_META-regionserver/25494438c68b:0-0 {event_type=M_RS_OPEN_META, pid=3}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/8d9230e9-842c-a3d1-0f07-c120fc914ad9/WALs/25494438c68b,35585,1733475265197/25494438c68b%2C35585%2C1733475265197.meta.1733475267077.meta, exclude list is [], retry=0 2024-12-06T08:54:27,097 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:32979,DS-41cd615e-ddf3-441e-837f-3704a1617c48,DISK] 2024-12-06T08:54:27,097 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43435,DS-0d93c57c-7eaa-47bc-b70c-4cf0a19ad55a,DISK] 2024-12-06T08:54:27,097 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:40317,DS-757c0f5c-5227-42ef-84e9-f08391d8d4ac,DISK] 2024-12-06T08:54:27,101 INFO [RS_OPEN_META-regionserver/25494438c68b:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/8d9230e9-842c-a3d1-0f07-c120fc914ad9/WALs/25494438c68b,35585,1733475265197/25494438c68b%2C35585%2C1733475265197.meta.1733475267077.meta 2024-12-06T08:54:27,101 DEBUG [RS_OPEN_META-regionserver/25494438c68b:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:43805:43805),(127.0.0.1/127.0.0.1:40153:40153),(127.0.0.1/127.0.0.1:37687:37687)] 2024-12-06T08:54:27,102 DEBUG [RS_OPEN_META-regionserver/25494438c68b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-06T08:54:27,104 DEBUG [RS_OPEN_META-regionserver/25494438c68b:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-06T08:54:27,108 DEBUG [RS_OPEN_META-regionserver/25494438c68b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-06T08:54:27,113 INFO [RS_OPEN_META-regionserver/25494438c68b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-06T08:54:27,118 DEBUG [RS_OPEN_META-regionserver/25494438c68b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-06T08:54:27,119 DEBUG [RS_OPEN_META-regionserver/25494438c68b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T08:54:27,119 DEBUG [RS_OPEN_META-regionserver/25494438c68b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-06T08:54:27,119 DEBUG [RS_OPEN_META-regionserver/25494438c68b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-06T08:54:27,122 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-06T08:54:27,124 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-06T08:54:27,124 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:54:27,125 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T08:54:27,126 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-06T08:54:27,127 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-06T08:54:27,128 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:54:27,129 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T08:54:27,129 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-06T08:54:27,130 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-06T08:54:27,131 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:54:27,132 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T08:54:27,132 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-06T08:54:27,133 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-06T08:54:27,133 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:54:27,134 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T08:54:27,134 DEBUG [RS_OPEN_META-regionserver/25494438c68b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-06T08:54:27,136 DEBUG [RS_OPEN_META-regionserver/25494438c68b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37087/user/jenkins/test-data/8d9230e9-842c-a3d1-0f07-c120fc914ad9/data/hbase/meta/1588230740 2024-12-06T08:54:27,138 DEBUG [RS_OPEN_META-regionserver/25494438c68b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37087/user/jenkins/test-data/8d9230e9-842c-a3d1-0f07-c120fc914ad9/data/hbase/meta/1588230740 2024-12-06T08:54:27,141 DEBUG [RS_OPEN_META-regionserver/25494438c68b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-06T08:54:27,141 DEBUG [RS_OPEN_META-regionserver/25494438c68b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-06T08:54:27,142 DEBUG [RS_OPEN_META-regionserver/25494438c68b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-06T08:54:27,145 DEBUG [RS_OPEN_META-regionserver/25494438c68b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-06T08:54:27,147 INFO [RS_OPEN_META-regionserver/25494438c68b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=60910833, jitterRate=-0.09235785901546478}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-06T08:54:27,147 DEBUG [RS_OPEN_META-regionserver/25494438c68b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-06T08:54:27,149 DEBUG [RS_OPEN_META-regionserver/25494438c68b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733475267120Writing region info on filesystem at 1733475267120Initializing all the Stores at 1733475267122 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733475267122Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733475267122Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733475267122Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733475267122Cleaning up temporary data from old regions at 1733475267141 (+19 ms)Running coprocessor post-open hooks at 1733475267147 (+6 ms)Region opened successfully at 1733475267149 (+2 ms) 2024-12-06T08:54:27,159 INFO [RS_OPEN_META-regionserver/25494438c68b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733475267047 2024-12-06T08:54:27,170 DEBUG [RS_OPEN_META-regionserver/25494438c68b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-06T08:54:27,171 INFO [RS_OPEN_META-regionserver/25494438c68b:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-06T08:54:27,173 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=25494438c68b,35585,1733475265197 2024-12-06T08:54:27,175 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 25494438c68b,35585,1733475265197, state=OPEN 2024-12-06T08:54:27,178 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35585-0x10066891d110002, quorum=127.0.0.1:51517, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-06T08:54:27,178 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45419-0x10066891d110000, quorum=127.0.0.1:51517, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-06T08:54:27,178 DEBUG [pool-77-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37829-0x10066891d110003, quorum=127.0.0.1:51517, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-06T08:54:27,178 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34279-0x10066891d110001, quorum=127.0.0.1:51517, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-06T08:54:27,178 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-06T08:54:27,178 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-06T08:54:27,178 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-06T08:54:27,178 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-06T08:54:27,178 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=25494438c68b,35585,1733475265197 2024-12-06T08:54:27,184 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-06T08:54:27,184 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=25494438c68b,35585,1733475265197 in 300 msec 2024-12-06T08:54:27,191 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-06T08:54:27,191 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 771 msec 2024-12-06T08:54:27,193 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-06T08:54:27,193 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-06T08:54:27,217 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-06T08:54:27,218 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=25494438c68b,35585,1733475265197, seqNum=-1] 2024-12-06T08:54:27,239 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-06T08:54:27,242 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34429, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-06T08:54:27,295 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.0910 sec 2024-12-06T08:54:27,296 INFO [master/25494438c68b:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733475267295, completionTime=-1 2024-12-06T08:54:27,299 INFO [master/25494438c68b:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=3; waited=0ms, expected min=3 server(s), max=3 server(s), master is running 2024-12-06T08:54:27,299 DEBUG [master/25494438c68b:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-06T08:54:27,335 INFO [master/25494438c68b:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=3 2024-12-06T08:54:27,336 INFO [master/25494438c68b:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733475327336 2024-12-06T08:54:27,336 INFO [master/25494438c68b:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733475387336 2024-12-06T08:54:27,336 INFO [master/25494438c68b:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 36 msec 2024-12-06T08:54:27,338 DEBUG [master/25494438c68b:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 1588230740 changed from -1.0 to 0.0, refreshing cache 2024-12-06T08:54:27,346 INFO [master/25494438c68b:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=25494438c68b,45419,1733475264356-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-06T08:54:27,346 INFO [master/25494438c68b:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=25494438c68b,45419,1733475264356-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-06T08:54:27,346 INFO [master/25494438c68b:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=25494438c68b,45419,1733475264356-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-06T08:54:27,348 INFO [master/25494438c68b:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-25494438c68b:45419, period=300000, unit=MILLISECONDS is enabled. 2024-12-06T08:54:27,349 INFO [master/25494438c68b:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-06T08:54:27,349 INFO [master/25494438c68b:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-06T08:54:27,357 DEBUG [master/25494438c68b:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-06T08:54:27,380 INFO [master/25494438c68b:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 2.045sec 2024-12-06T08:54:27,381 INFO [master/25494438c68b:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-06T08:54:27,383 INFO [master/25494438c68b:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-06T08:54:27,384 INFO [master/25494438c68b:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-06T08:54:27,384 INFO [master/25494438c68b:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-06T08:54:27,384 INFO [master/25494438c68b:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-06T08:54:27,385 INFO [master/25494438c68b:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=25494438c68b,45419,1733475264356-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-06T08:54:27,386 INFO [master/25494438c68b:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=25494438c68b,45419,1733475264356-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-06T08:54:27,392 DEBUG [master/25494438c68b:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-06T08:54:27,393 INFO [master/25494438c68b:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-06T08:54:27,393 INFO [master/25494438c68b:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=25494438c68b,45419,1733475264356-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-06T08:54:27,492 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@25e0c3f2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T08:54:27,497 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-12-06T08:54:27,497 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-12-06T08:54:27,501 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 25494438c68b,45419,-1 for getting cluster id 2024-12-06T08:54:27,504 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-06T08:54:27,513 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '6f6c7ab0-c897-44d6-81df-33df81a36918' 2024-12-06T08:54:27,515 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-06T08:54:27,515 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "6f6c7ab0-c897-44d6-81df-33df81a36918" 2024-12-06T08:54:27,515 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5c91a50c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T08:54:27,516 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [25494438c68b,45419,-1] 2024-12-06T08:54:27,518 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-06T08:54:27,519 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T08:54:27,521 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58112, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-06T08:54:27,523 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1f28ba97, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T08:54:27,524 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-06T08:54:27,531 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=25494438c68b,35585,1733475265197, seqNum=-1] 2024-12-06T08:54:27,531 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-06T08:54:27,533 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55014, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-06T08:54:27,553 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=25494438c68b,45419,1733475264356 2024-12-06T08:54:27,558 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-06T08:54:27,563 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.AsyncConnectionImpl(321): The fetched master address is 25494438c68b,45419,1733475264356 2024-12-06T08:54:27,567 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@234f292f 2024-12-06T08:54:27,568 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-06T08:54:27,570 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58114, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-06T08:54:27,576 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-06T08:54:27,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC 2024-12-06T08:54:27,586 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_PRE_OPERATION 2024-12-06T08:54:27,588 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestHBaseWalOnEC" procId is: 4 2024-12-06T08:54:27,589 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:54:27,592 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-06T08:54:27,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-06T08:54:27,602 WARN [PEWorker-3 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-06T08:54:27,602 WARN [PEWorker-3 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-06T08:54:27,606 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-502423260_22 at /127.0.0.1:45222 [Receiving block BP-291478311-172.17.0.2-1733475260908:blk_-9223372036854775680_1020] {}] datanode.DataXceiver(331): 127.0.0.1:32979:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45222 dst: /127.0.0.1:32979 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:54:27,614 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32979 is added to blk_-9223372036854775680_1021 (size=392) 2024-12-06T08:54:27,614 WARN [PEWorker-3 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-06T08:54:27,617 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 65bcf030eb8a5f0d199676d28c705213, NAME => 'TestHBaseWalOnEC,,1733475267572.65bcf030eb8a5f0d199676d28c705213.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37087/user/jenkins/test-data/8d9230e9-842c-a3d1-0f07-c120fc914ad9 2024-12-06T08:54:27,624 WARN [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-06T08:54:27,624 WARN [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-06T08:54:27,627 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-502423260_22 at /127.0.0.1:45236 [Receiving block BP-291478311-172.17.0.2-1733475260908:blk_-9223372036854775664_1022] {}] datanode.DataXceiver(331): 127.0.0.1:32979:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45236 dst: /127.0.0.1:32979 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:54:27,631 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32979 is added to blk_-9223372036854775664_1023 (size=51) 2024-12-06T08:54:27,632 WARN [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-06T08:54:27,632 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1733475267572.65bcf030eb8a5f0d199676d28c705213.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T08:54:27,633 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1722): Closing 65bcf030eb8a5f0d199676d28c705213, disabling compactions & flushes 2024-12-06T08:54:27,633 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1733475267572.65bcf030eb8a5f0d199676d28c705213. 2024-12-06T08:54:27,633 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1733475267572.65bcf030eb8a5f0d199676d28c705213. 2024-12-06T08:54:27,633 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1733475267572.65bcf030eb8a5f0d199676d28c705213. after waiting 0 ms 2024-12-06T08:54:27,633 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1733475267572.65bcf030eb8a5f0d199676d28c705213. 2024-12-06T08:54:27,633 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1733475267572.65bcf030eb8a5f0d199676d28c705213. 2024-12-06T08:54:27,633 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1676): Region close journal for 65bcf030eb8a5f0d199676d28c705213: Waiting for close lock at 1733475267633Disabling compacts and flushes for region at 1733475267633Disabling writes for close at 1733475267633Writing region close event to WAL at 1733475267633Closed at 1733475267633 2024-12-06T08:54:27,636 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ADD_TO_META 2024-12-06T08:54:27,641 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestHBaseWalOnEC,,1733475267572.65bcf030eb8a5f0d199676d28c705213.","families":{"info":[{"qualifier":"regioninfo","vlen":50,"tag":[],"timestamp":"1733475267636"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733475267636"}]},"ts":"1733475267636"} 2024-12-06T08:54:27,647 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-06T08:54:27,650 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-06T08:54:27,653 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733475267650"}]},"ts":"1733475267650"} 2024-12-06T08:54:27,658 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLING in hbase:meta 2024-12-06T08:54:27,658 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {25494438c68b=0} racks are {/default-rack=0} 2024-12-06T08:54:27,660 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-06T08:54:27,660 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-06T08:54:27,660 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-06T08:54:27,660 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-06T08:54:27,660 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-06T08:54:27,660 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-06T08:54:27,660 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-06T08:54:27,660 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-06T08:54:27,660 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-06T08:54:27,660 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-06T08:54:27,661 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=65bcf030eb8a5f0d199676d28c705213, ASSIGN}] 2024-12-06T08:54:27,664 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=65bcf030eb8a5f0d199676d28c705213, ASSIGN 2024-12-06T08:54:27,667 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=65bcf030eb8a5f0d199676d28c705213, ASSIGN; state=OFFLINE, location=25494438c68b,34279,1733475265079; forceNewPlan=false, retain=false 2024-12-06T08:54:27,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-06T08:54:27,819 INFO [25494438c68b:45419 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-06T08:54:27,820 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=65bcf030eb8a5f0d199676d28c705213, regionState=OPENING, regionLocation=25494438c68b,34279,1733475265079 2024-12-06T08:54:27,824 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=65bcf030eb8a5f0d199676d28c705213, ASSIGN because future has completed 2024-12-06T08:54:27,825 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 65bcf030eb8a5f0d199676d28c705213, server=25494438c68b,34279,1733475265079}] 2024-12-06T08:54:27,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-06T08:54:27,980 DEBUG [RSProcedureDispatcher-pool-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-06T08:54:27,982 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45323, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-06T08:54:27,988 INFO [RS_OPEN_REGION-regionserver/25494438c68b:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestHBaseWalOnEC,,1733475267572.65bcf030eb8a5f0d199676d28c705213. 2024-12-06T08:54:27,989 DEBUG [RS_OPEN_REGION-regionserver/25494438c68b:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 65bcf030eb8a5f0d199676d28c705213, NAME => 'TestHBaseWalOnEC,,1733475267572.65bcf030eb8a5f0d199676d28c705213.', STARTKEY => '', ENDKEY => ''} 2024-12-06T08:54:27,989 DEBUG [RS_OPEN_REGION-regionserver/25494438c68b:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestHBaseWalOnEC 65bcf030eb8a5f0d199676d28c705213 2024-12-06T08:54:27,989 DEBUG [RS_OPEN_REGION-regionserver/25494438c68b:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1733475267572.65bcf030eb8a5f0d199676d28c705213.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T08:54:27,989 DEBUG [RS_OPEN_REGION-regionserver/25494438c68b:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 65bcf030eb8a5f0d199676d28c705213 2024-12-06T08:54:27,989 DEBUG [RS_OPEN_REGION-regionserver/25494438c68b:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 65bcf030eb8a5f0d199676d28c705213 2024-12-06T08:54:27,992 INFO [StoreOpener-65bcf030eb8a5f0d199676d28c705213-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 65bcf030eb8a5f0d199676d28c705213 2024-12-06T08:54:27,994 INFO [StoreOpener-65bcf030eb8a5f0d199676d28c705213-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 65bcf030eb8a5f0d199676d28c705213 columnFamilyName cf 2024-12-06T08:54:27,994 DEBUG [StoreOpener-65bcf030eb8a5f0d199676d28c705213-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:54:27,995 INFO [StoreOpener-65bcf030eb8a5f0d199676d28c705213-1 {}] regionserver.HStore(327): Store=65bcf030eb8a5f0d199676d28c705213/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-06T08:54:27,995 DEBUG [RS_OPEN_REGION-regionserver/25494438c68b:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 65bcf030eb8a5f0d199676d28c705213 2024-12-06T08:54:27,996 DEBUG [RS_OPEN_REGION-regionserver/25494438c68b:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37087/user/jenkins/test-data/8d9230e9-842c-a3d1-0f07-c120fc914ad9/data/default/TestHBaseWalOnEC/65bcf030eb8a5f0d199676d28c705213 2024-12-06T08:54:27,997 DEBUG [RS_OPEN_REGION-regionserver/25494438c68b:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37087/user/jenkins/test-data/8d9230e9-842c-a3d1-0f07-c120fc914ad9/data/default/TestHBaseWalOnEC/65bcf030eb8a5f0d199676d28c705213 2024-12-06T08:54:27,998 DEBUG [RS_OPEN_REGION-regionserver/25494438c68b:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 65bcf030eb8a5f0d199676d28c705213 2024-12-06T08:54:27,998 DEBUG [RS_OPEN_REGION-regionserver/25494438c68b:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 65bcf030eb8a5f0d199676d28c705213 2024-12-06T08:54:28,000 DEBUG [RS_OPEN_REGION-regionserver/25494438c68b:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 65bcf030eb8a5f0d199676d28c705213 2024-12-06T08:54:28,007 DEBUG [RS_OPEN_REGION-regionserver/25494438c68b:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37087/user/jenkins/test-data/8d9230e9-842c-a3d1-0f07-c120fc914ad9/data/default/TestHBaseWalOnEC/65bcf030eb8a5f0d199676d28c705213/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-06T08:54:28,008 INFO [RS_OPEN_REGION-regionserver/25494438c68b:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 65bcf030eb8a5f0d199676d28c705213; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=71817005, jitterRate=0.0701567679643631}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-06T08:54:28,008 DEBUG [RS_OPEN_REGION-regionserver/25494438c68b:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 65bcf030eb8a5f0d199676d28c705213 2024-12-06T08:54:28,009 DEBUG [RS_OPEN_REGION-regionserver/25494438c68b:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 65bcf030eb8a5f0d199676d28c705213: Running coprocessor pre-open hook at 1733475267989Writing region info on filesystem at 1733475267989Initializing all the Stores at 1733475267991 (+2 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733475267991Cleaning up temporary data from old regions at 1733475267998 (+7 ms)Running coprocessor post-open hooks at 1733475268008 (+10 ms)Region opened successfully at 1733475268009 (+1 ms) 2024-12-06T08:54:28,011 INFO [RS_OPEN_REGION-regionserver/25494438c68b:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestHBaseWalOnEC,,1733475267572.65bcf030eb8a5f0d199676d28c705213., pid=6, masterSystemTime=1733475267979 2024-12-06T08:54:28,014 DEBUG [RS_OPEN_REGION-regionserver/25494438c68b:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestHBaseWalOnEC,,1733475267572.65bcf030eb8a5f0d199676d28c705213. 2024-12-06T08:54:28,014 INFO [RS_OPEN_REGION-regionserver/25494438c68b:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestHBaseWalOnEC,,1733475267572.65bcf030eb8a5f0d199676d28c705213. 2024-12-06T08:54:28,016 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=65bcf030eb8a5f0d199676d28c705213, regionState=OPEN, openSeqNum=2, regionLocation=25494438c68b,34279,1733475265079 2024-12-06T08:54:28,019 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 65bcf030eb8a5f0d199676d28c705213, server=25494438c68b,34279,1733475265079 because future has completed 2024-12-06T08:54:28,025 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-06T08:54:28,025 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 65bcf030eb8a5f0d199676d28c705213, server=25494438c68b,34279,1733475265079 in 196 msec 2024-12-06T08:54:28,029 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-06T08:54:28,029 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=65bcf030eb8a5f0d199676d28c705213, ASSIGN in 364 msec 2024-12-06T08:54:28,031 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-06T08:54:28,031 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733475268031"}]},"ts":"1733475268031"} 2024-12-06T08:54:28,034 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLED in hbase:meta 2024-12-06T08:54:28,035 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_POST_OPERATION 2024-12-06T08:54:28,038 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC in 456 msec 2024-12-06T08:54:28,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-06T08:54:28,228 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestHBaseWalOnEC completed 2024-12-06T08:54:28,228 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table TestHBaseWalOnEC get assigned. Timeout = 60000ms 2024-12-06T08:54:28,230 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-06T08:54:28,236 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table TestHBaseWalOnEC assigned to meta. Checking AM states. 2024-12-06T08:54:28,236 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-06T08:54:28,237 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table TestHBaseWalOnEC assigned. 2024-12-06T08:54:28,246 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestHBaseWalOnEC', row='row', locateType=CURRENT is [region=TestHBaseWalOnEC,,1733475267572.65bcf030eb8a5f0d199676d28c705213., hostname=25494438c68b,34279,1733475265079, seqNum=2] 2024-12-06T08:54:28,247 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-06T08:54:28,250 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52996, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-06T08:54:28,258 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestHBaseWalOnEC 2024-12-06T08:54:28,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC 2024-12-06T08:54:28,265 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_PREPARE 2024-12-06T08:54:28,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-06T08:54:28,267 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-06T08:54:28,268 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-06T08:54:28,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-06T08:54:28,433 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34279 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-12-06T08:54:28,434 DEBUG [RS_FLUSH_OPERATIONS-regionserver/25494438c68b:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestHBaseWalOnEC,,1733475267572.65bcf030eb8a5f0d199676d28c705213. 2024-12-06T08:54:28,438 INFO [RS_FLUSH_OPERATIONS-regionserver/25494438c68b:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing 65bcf030eb8a5f0d199676d28c705213 1/1 column families, dataSize=32 B heapSize=360 B 2024-12-06T08:54:28,501 DEBUG [RS_FLUSH_OPERATIONS-regionserver/25494438c68b:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37087/user/jenkins/test-data/8d9230e9-842c-a3d1-0f07-c120fc914ad9/data/default/TestHBaseWalOnEC/65bcf030eb8a5f0d199676d28c705213/.tmp/cf/e01dd79376c24677a1bef17e7a733ce8 is 36, key is row/cf:cq/1733475268250/Put/seqid=0 2024-12-06T08:54:28,508 WARN [RS_FLUSH_OPERATIONS-regionserver/25494438c68b:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-06T08:54:28,508 WARN [RS_FLUSH_OPERATIONS-regionserver/25494438c68b:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-06T08:54:28,512 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1068333433_22 at /127.0.0.1:56420 [Receiving block BP-291478311-172.17.0.2-1733475260908:blk_-9223372036854775648_1024] {}] datanode.DataXceiver(331): 127.0.0.1:40317:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:56420 dst: /127.0.0.1:40317 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:54:28,523 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40317 is added to blk_-9223372036854775648_1025 (size=4787) 2024-12-06T08:54:28,524 WARN [RS_FLUSH_OPERATIONS-regionserver/25494438c68b:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-06T08:54:28,524 INFO [RS_FLUSH_OPERATIONS-regionserver/25494438c68b:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=32 B at sequenceid=5 (bloomFilter=false), to=hdfs://localhost:37087/user/jenkins/test-data/8d9230e9-842c-a3d1-0f07-c120fc914ad9/data/default/TestHBaseWalOnEC/65bcf030eb8a5f0d199676d28c705213/.tmp/cf/e01dd79376c24677a1bef17e7a733ce8 2024-12-06T08:54:28,581 DEBUG [RS_FLUSH_OPERATIONS-regionserver/25494438c68b:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37087/user/jenkins/test-data/8d9230e9-842c-a3d1-0f07-c120fc914ad9/data/default/TestHBaseWalOnEC/65bcf030eb8a5f0d199676d28c705213/.tmp/cf/e01dd79376c24677a1bef17e7a733ce8 as hdfs://localhost:37087/user/jenkins/test-data/8d9230e9-842c-a3d1-0f07-c120fc914ad9/data/default/TestHBaseWalOnEC/65bcf030eb8a5f0d199676d28c705213/cf/e01dd79376c24677a1bef17e7a733ce8 2024-12-06T08:54:28,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-06T08:54:28,592 INFO [RS_FLUSH_OPERATIONS-regionserver/25494438c68b:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37087/user/jenkins/test-data/8d9230e9-842c-a3d1-0f07-c120fc914ad9/data/default/TestHBaseWalOnEC/65bcf030eb8a5f0d199676d28c705213/cf/e01dd79376c24677a1bef17e7a733ce8, entries=1, sequenceid=5, filesize=4.7 K 2024-12-06T08:54:28,602 INFO [RS_FLUSH_OPERATIONS-regionserver/25494438c68b:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~32 B/32, heapSize ~344 B/344, currentSize=0 B/0 for 65bcf030eb8a5f0d199676d28c705213 in 160ms, sequenceid=5, compaction requested=false 2024-12-06T08:54:28,603 DEBUG [RS_FLUSH_OPERATIONS-regionserver/25494438c68b:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestHBaseWalOnEC' 2024-12-06T08:54:28,606 DEBUG [RS_FLUSH_OPERATIONS-regionserver/25494438c68b:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for 65bcf030eb8a5f0d199676d28c705213: 2024-12-06T08:54:28,606 DEBUG [RS_FLUSH_OPERATIONS-regionserver/25494438c68b:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestHBaseWalOnEC,,1733475267572.65bcf030eb8a5f0d199676d28c705213. 2024-12-06T08:54:28,608 DEBUG [RS_FLUSH_OPERATIONS-regionserver/25494438c68b:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-12-06T08:54:28,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-12-06T08:54:28,617 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-12-06T08:54:28,617 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 345 msec 2024-12-06T08:54:28,624 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC in 362 msec 2024-12-06T08:54:28,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-06T08:54:28,899 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestHBaseWalOnEC completed 2024-12-06T08:54:28,913 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-06T08:54:28,913 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-06T08:54:28,914 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-06T08:54:28,918 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T08:54:28,919 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T08:54:28,919 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-06T08:54:28,919 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-06T08:54:28,919 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1904156029, stopped=false 2024-12-06T08:54:28,920 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=25494438c68b,45419,1733475264356 2024-12-06T08:54:28,922 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35585-0x10066891d110002, quorum=127.0.0.1:51517, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-06T08:54:28,922 DEBUG [pool-77-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37829-0x10066891d110003, quorum=127.0.0.1:51517, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-06T08:54:28,922 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34279-0x10066891d110001, quorum=127.0.0.1:51517, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-06T08:54:28,922 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35585-0x10066891d110002, quorum=127.0.0.1:51517, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T08:54:28,922 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34279-0x10066891d110001, quorum=127.0.0.1:51517, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T08:54:28,922 DEBUG [pool-77-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37829-0x10066891d110003, quorum=127.0.0.1:51517, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T08:54:28,922 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45419-0x10066891d110000, quorum=127.0.0.1:51517, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-06T08:54:28,923 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45419-0x10066891d110000, quorum=127.0.0.1:51517, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T08:54:28,923 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-06T08:54:28,923 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-06T08:54:28,923 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:37829-0x10066891d110003, quorum=127.0.0.1:51517, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-06T08:54:28,923 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-06T08:54:28,923 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T08:54:28,924 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '25494438c68b,34279,1733475265079' ***** 2024-12-06T08:54:28,924 INFO [RS:2;25494438c68b:37829 {}] regionserver.HRegionServer(2196): ***** STOPPING region server '25494438c68b,37829,1733475265242' ***** 2024-12-06T08:54:28,924 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-06T08:54:28,924 INFO [RS:2;25494438c68b:37829 {}] regionserver.HRegionServer(2210): STOPPED: Exiting; cluster shutdown set and not carrying any regions 2024-12-06T08:54:28,924 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '25494438c68b,35585,1733475265197' ***** 2024-12-06T08:54:28,924 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-06T08:54:28,924 INFO [RS:0;25494438c68b:34279 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-06T08:54:28,924 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-06T08:54:28,924 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-06T08:54:28,925 INFO [RS:0;25494438c68b:34279 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-06T08:54:28,925 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:34279-0x10066891d110001, quorum=127.0.0.1:51517, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-06T08:54:28,925 INFO [RS:1;25494438c68b:35585 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-06T08:54:28,925 INFO [RS:0;25494438c68b:34279 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-06T08:54:28,925 INFO [regionserver/25494438c68b:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-06T08:54:28,925 INFO [regionserver/25494438c68b:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-06T08:54:28,926 INFO [RS:0;25494438c68b:34279 {}] regionserver.HRegionServer(3091): Received CLOSE for 65bcf030eb8a5f0d199676d28c705213 2024-12-06T08:54:28,926 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-06T08:54:28,926 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:45419-0x10066891d110000, quorum=127.0.0.1:51517, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-06T08:54:28,926 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:35585-0x10066891d110002, quorum=127.0.0.1:51517, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-06T08:54:28,927 INFO [RS:2;25494438c68b:37829 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-06T08:54:28,927 INFO [RS:0;25494438c68b:34279 {}] regionserver.HRegionServer(959): stopping server 25494438c68b,34279,1733475265079 2024-12-06T08:54:28,927 INFO [RS:2;25494438c68b:37829 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-06T08:54:28,927 INFO [RS:0;25494438c68b:34279 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-06T08:54:28,927 INFO [RS:1;25494438c68b:35585 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-06T08:54:28,927 INFO [RS:2;25494438c68b:37829 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-06T08:54:28,927 INFO [RS:2;25494438c68b:37829 {}] regionserver.HRegionServer(959): stopping server 25494438c68b,37829,1733475265242 2024-12-06T08:54:28,927 INFO [RS:0;25494438c68b:34279 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;25494438c68b:34279. 2024-12-06T08:54:28,927 INFO [RS:1;25494438c68b:35585 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-06T08:54:28,927 INFO [RS:2;25494438c68b:37829 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-06T08:54:28,928 INFO [RS:1;25494438c68b:35585 {}] regionserver.HRegionServer(959): stopping server 25494438c68b,35585,1733475265197 2024-12-06T08:54:28,928 INFO [RS:2;25494438c68b:37829 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:2;25494438c68b:37829. 2024-12-06T08:54:28,928 DEBUG [RS:0;25494438c68b:34279 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-06T08:54:28,928 INFO [RS:1;25494438c68b:35585 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-06T08:54:28,928 DEBUG [RS:0;25494438c68b:34279 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T08:54:28,928 DEBUG [RS:2;25494438c68b:37829 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-06T08:54:28,928 DEBUG [RS:2;25494438c68b:37829 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T08:54:28,928 INFO [RS:1;25494438c68b:35585 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;25494438c68b:35585. 2024-12-06T08:54:28,928 DEBUG [RS:1;25494438c68b:35585 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-06T08:54:28,928 DEBUG [RS:1;25494438c68b:35585 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T08:54:28,928 INFO [RS:0;25494438c68b:34279 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-12-06T08:54:28,928 INFO [RS:2;25494438c68b:37829 {}] regionserver.HRegionServer(976): stopping server 25494438c68b,37829,1733475265242; all regions closed. 2024-12-06T08:54:28,928 DEBUG [RS:0;25494438c68b:34279 {}] regionserver.HRegionServer(1325): Online Regions={65bcf030eb8a5f0d199676d28c705213=TestHBaseWalOnEC,,1733475267572.65bcf030eb8a5f0d199676d28c705213.} 2024-12-06T08:54:28,928 DEBUG [RS:0;25494438c68b:34279 {}] regionserver.HRegionServer(1351): Waiting on 65bcf030eb8a5f0d199676d28c705213 2024-12-06T08:54:28,929 INFO [RS:1;25494438c68b:35585 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-06T08:54:28,929 INFO [RS:1;25494438c68b:35585 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-06T08:54:28,929 INFO [RS:1;25494438c68b:35585 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-06T08:54:28,929 INFO [RS:1;25494438c68b:35585 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-06T08:54:28,930 DEBUG [RS_CLOSE_REGION-regionserver/25494438c68b:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 65bcf030eb8a5f0d199676d28c705213, disabling compactions & flushes 2024-12-06T08:54:28,930 INFO [RS_CLOSE_REGION-regionserver/25494438c68b:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1733475267572.65bcf030eb8a5f0d199676d28c705213. 2024-12-06T08:54:28,930 DEBUG [RS_CLOSE_REGION-regionserver/25494438c68b:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1733475267572.65bcf030eb8a5f0d199676d28c705213. 2024-12-06T08:54:28,930 DEBUG [RS_CLOSE_REGION-regionserver/25494438c68b:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1733475267572.65bcf030eb8a5f0d199676d28c705213. after waiting 0 ms 2024-12-06T08:54:28,930 DEBUG [RS_CLOSE_REGION-regionserver/25494438c68b:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1733475267572.65bcf030eb8a5f0d199676d28c705213. 2024-12-06T08:54:28,931 DEBUG [RS_CLOSE_META-regionserver/25494438c68b:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-06T08:54:28,931 INFO [RS_CLOSE_META-regionserver/25494438c68b:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-06T08:54:28,931 DEBUG [RS_CLOSE_META-regionserver/25494438c68b:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-06T08:54:28,931 DEBUG [RS_CLOSE_META-regionserver/25494438c68b:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-06T08:54:28,931 DEBUG [RS_CLOSE_META-regionserver/25494438c68b:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-06T08:54:28,932 INFO [RS_CLOSE_META-regionserver/25494438c68b:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.34 KB heapSize=3.38 KB 2024-12-06T08:54:28,930 INFO [RS:1;25494438c68b:35585 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-12-06T08:54:28,933 DEBUG [RS:1;25494438c68b:35585 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-12-06T08:54:28,933 DEBUG [RS:1;25494438c68b:35585 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-12-06T08:54:28,944 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40317 is added to blk_1073741827_1017 (size=93) 2024-12-06T08:54:28,944 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32979 is added to blk_1073741827_1017 (size=93) 2024-12-06T08:54:28,945 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43435 is added to blk_1073741827_1017 (size=93) 2024-12-06T08:54:28,952 DEBUG [RS:2;25494438c68b:37829 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/8d9230e9-842c-a3d1-0f07-c120fc914ad9/oldWALs 2024-12-06T08:54:28,952 INFO [RS:2;25494438c68b:37829 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 25494438c68b%2C37829%2C1733475265242:(num 1733475266686) 2024-12-06T08:54:28,952 DEBUG [RS:2;25494438c68b:37829 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T08:54:28,953 INFO [RS:2;25494438c68b:37829 {}] regionserver.LeaseManager(133): Closed leases 2024-12-06T08:54:28,953 INFO [RS:2;25494438c68b:37829 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-06T08:54:28,953 INFO [RS:2;25494438c68b:37829 {}] hbase.ChoreService(370): Chore service for: regionserver/25494438c68b:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-12-06T08:54:28,953 INFO [RS:2;25494438c68b:37829 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-06T08:54:28,953 INFO [RS:2;25494438c68b:37829 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-06T08:54:28,953 INFO [RS:2;25494438c68b:37829 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-06T08:54:28,953 INFO [regionserver/25494438c68b:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-06T08:54:28,953 INFO [RS:2;25494438c68b:37829 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-06T08:54:28,954 INFO [RS:2;25494438c68b:37829 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:37829 2024-12-06T08:54:28,955 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32979 is added to blk_-9223372036854775772_1004 (size=42) 2024-12-06T08:54:28,956 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43435 is added to blk_-9223372036854775709_1013 (size=1321) 2024-12-06T08:54:28,959 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32979 is added to blk_-9223372036854775708_1013 (size=1321) 2024-12-06T08:54:28,960 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45419-0x10066891d110000, quorum=127.0.0.1:51517, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-06T08:54:28,960 DEBUG [pool-77-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37829-0x10066891d110003, quorum=127.0.0.1:51517, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/25494438c68b,37829,1733475265242 2024-12-06T08:54:28,961 INFO [RS:2;25494438c68b:37829 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-06T08:54:28,961 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40317 is added to blk_-9223372036854775756_1006 (size=196) 2024-12-06T08:54:28,962 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [25494438c68b,37829,1733475265242] 2024-12-06T08:54:28,965 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/25494438c68b,37829,1733475265242 already deleted, retry=false 2024-12-06T08:54:28,965 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 25494438c68b,37829,1733475265242 expired; onlineServers=2 2024-12-06T08:54:28,966 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43435 is added to blk_-9223372036854775773_1004 (size=42) 2024-12-06T08:54:28,969 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32979 is added to blk_-9223372036854775757_1006 (size=196) 2024-12-06T08:54:28,982 DEBUG [RS_CLOSE_REGION-regionserver/25494438c68b:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37087/user/jenkins/test-data/8d9230e9-842c-a3d1-0f07-c120fc914ad9/data/default/TestHBaseWalOnEC/65bcf030eb8a5f0d199676d28c705213/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-06T08:54:28,985 INFO [RS_CLOSE_REGION-regionserver/25494438c68b:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1733475267572.65bcf030eb8a5f0d199676d28c705213. 2024-12-06T08:54:28,986 DEBUG [RS_CLOSE_REGION-regionserver/25494438c68b:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 65bcf030eb8a5f0d199676d28c705213: Waiting for close lock at 1733475268929Running coprocessor pre-close hooks at 1733475268930 (+1 ms)Disabling compacts and flushes for region at 1733475268930Disabling writes for close at 1733475268930Writing region close event to WAL at 1733475268956 (+26 ms)Running coprocessor post-close hooks at 1733475268983 (+27 ms)Closed at 1733475268985 (+2 ms) 2024-12-06T08:54:28,986 DEBUG [RS_CLOSE_REGION-regionserver/25494438c68b:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestHBaseWalOnEC,,1733475267572.65bcf030eb8a5f0d199676d28c705213. 2024-12-06T08:54:28,992 DEBUG [RS_CLOSE_META-regionserver/25494438c68b:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37087/user/jenkins/test-data/8d9230e9-842c-a3d1-0f07-c120fc914ad9/data/hbase/meta/1588230740/.tmp/info/35e58a3cdd9a4dd9a5286f99ef59faca is 153, key is TestHBaseWalOnEC,,1733475267572.65bcf030eb8a5f0d199676d28c705213./info:regioninfo/1733475268015/Put/seqid=0 2024-12-06T08:54:28,995 WARN [RS_CLOSE_META-regionserver/25494438c68b:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-06T08:54:28,995 WARN [RS_CLOSE_META-regionserver/25494438c68b:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-06T08:54:29,001 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1559564206_22 at /127.0.0.1:56476 [Receiving block BP-291478311-172.17.0.2-1733475260908:blk_-9223372036854775632_1026] {}] datanode.DataXceiver(331): 127.0.0.1:40317:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:56476 dst: /127.0.0.1:40317 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:54:29,009 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40317 is added to blk_-9223372036854775632_1027 (size=6637) 2024-12-06T08:54:29,011 WARN [RS_CLOSE_META-regionserver/25494438c68b:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-06T08:54:29,011 INFO [RS_CLOSE_META-regionserver/25494438c68b:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.18 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:37087/user/jenkins/test-data/8d9230e9-842c-a3d1-0f07-c120fc914ad9/data/hbase/meta/1588230740/.tmp/info/35e58a3cdd9a4dd9a5286f99ef59faca 2024-12-06T08:54:29,025 INFO [regionserver/25494438c68b:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-06T08:54:29,043 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43435 is added to blk_-9223372036854775740_1008 (size=1189) 2024-12-06T08:54:29,045 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40317 is added to blk_-9223372036854775741_1008 (size=1189) 2024-12-06T08:54:29,046 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40317 is added to blk_-9223372036854775725_1010 (size=34) 2024-12-06T08:54:29,047 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43435 is added to blk_-9223372036854775724_1010 (size=34) 2024-12-06T08:54:29,051 DEBUG [RS_CLOSE_META-regionserver/25494438c68b:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37087/user/jenkins/test-data/8d9230e9-842c-a3d1-0f07-c120fc914ad9/data/hbase/meta/1588230740/.tmp/ns/50a3dc3b94084582bb453e76ab24ef9e is 43, key is default/ns:d/1733475267269/Put/seqid=0 2024-12-06T08:54:29,054 WARN [RS_CLOSE_META-regionserver/25494438c68b:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-06T08:54:29,054 WARN [RS_CLOSE_META-regionserver/25494438c68b:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-06T08:54:29,061 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1559564206_22 at /127.0.0.1:45310 [Receiving block BP-291478311-172.17.0.2-1733475260908:blk_-9223372036854775616_1028] {}] datanode.DataXceiver(331): 127.0.0.1:32979:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45310 dst: /127.0.0.1:32979 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:54:29,064 DEBUG [pool-77-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37829-0x10066891d110003, quorum=127.0.0.1:51517, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-06T08:54:29,064 DEBUG [pool-77-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37829-0x10066891d110003, quorum=127.0.0.1:51517, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-06T08:54:29,064 INFO [RS:2;25494438c68b:37829 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-06T08:54:29,065 INFO [RS:2;25494438c68b:37829 {}] regionserver.HRegionServer(1031): Exiting; stopping=25494438c68b,37829,1733475265242; zookeeper connection closed. 2024-12-06T08:54:29,068 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@2b6c3c04 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@2b6c3c04 2024-12-06T08:54:29,072 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32979 is added to blk_-9223372036854775616_1029 (size=5153) 2024-12-06T08:54:29,074 WARN [RS_CLOSE_META-regionserver/25494438c68b:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-06T08:54:29,074 INFO [RS_CLOSE_META-regionserver/25494438c68b:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:37087/user/jenkins/test-data/8d9230e9-842c-a3d1-0f07-c120fc914ad9/data/hbase/meta/1588230740/.tmp/ns/50a3dc3b94084582bb453e76ab24ef9e 2024-12-06T08:54:29,102 DEBUG [RS_CLOSE_META-regionserver/25494438c68b:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37087/user/jenkins/test-data/8d9230e9-842c-a3d1-0f07-c120fc914ad9/data/hbase/meta/1588230740/.tmp/table/15697ea852fb4f8483e61ab35c1a2e3c is 52, key is TestHBaseWalOnEC/table:state/1733475268031/Put/seqid=0 2024-12-06T08:54:29,105 WARN [RS_CLOSE_META-regionserver/25494438c68b:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-06T08:54:29,105 WARN [RS_CLOSE_META-regionserver/25494438c68b:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-06T08:54:29,109 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1559564206_22 at /127.0.0.1:56510 [Receiving block BP-291478311-172.17.0.2-1733475260908:blk_-9223372036854775600_1030] {}] datanode.DataXceiver(331): 127.0.0.1:40317:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:56510 dst: /127.0.0.1:40317 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:54:29,117 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40317 is added to blk_-9223372036854775600_1031 (size=5249) 2024-12-06T08:54:29,117 WARN [RS_CLOSE_META-regionserver/25494438c68b:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-06T08:54:29,118 INFO [RS_CLOSE_META-regionserver/25494438c68b:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=96 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:37087/user/jenkins/test-data/8d9230e9-842c-a3d1-0f07-c120fc914ad9/data/hbase/meta/1588230740/.tmp/table/15697ea852fb4f8483e61ab35c1a2e3c 2024-12-06T08:54:29,129 INFO [RS:0;25494438c68b:34279 {}] regionserver.HRegionServer(976): stopping server 25494438c68b,34279,1733475265079; all regions closed. 2024-12-06T08:54:29,130 DEBUG [RS_CLOSE_META-regionserver/25494438c68b:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37087/user/jenkins/test-data/8d9230e9-842c-a3d1-0f07-c120fc914ad9/data/hbase/meta/1588230740/.tmp/info/35e58a3cdd9a4dd9a5286f99ef59faca as hdfs://localhost:37087/user/jenkins/test-data/8d9230e9-842c-a3d1-0f07-c120fc914ad9/data/hbase/meta/1588230740/info/35e58a3cdd9a4dd9a5286f99ef59faca 2024-12-06T08:54:29,134 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32979 is added to blk_1073741828_1018 (size=1298) 2024-12-06T08:54:29,134 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40317 is added to blk_1073741828_1018 (size=1298) 2024-12-06T08:54:29,134 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43435 is added to blk_1073741828_1018 (size=1298) 2024-12-06T08:54:29,136 DEBUG [RS:1;25494438c68b:35585 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-12-06T08:54:29,139 DEBUG [RS:0;25494438c68b:34279 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/8d9230e9-842c-a3d1-0f07-c120fc914ad9/oldWALs 2024-12-06T08:54:29,139 INFO [RS:0;25494438c68b:34279 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 25494438c68b%2C34279%2C1733475265079:(num 1733475266686) 2024-12-06T08:54:29,139 DEBUG [RS:0;25494438c68b:34279 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T08:54:29,139 INFO [RS:0;25494438c68b:34279 {}] regionserver.LeaseManager(133): Closed leases 2024-12-06T08:54:29,140 INFO [RS:0;25494438c68b:34279 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-06T08:54:29,140 INFO [RS:0;25494438c68b:34279 {}] hbase.ChoreService(370): Chore service for: regionserver/25494438c68b:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-06T08:54:29,141 INFO [RS:0;25494438c68b:34279 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-06T08:54:29,141 INFO [RS:0;25494438c68b:34279 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-06T08:54:29,141 INFO [RS:0;25494438c68b:34279 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-06T08:54:29,141 INFO [RS:0;25494438c68b:34279 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-06T08:54:29,141 INFO [regionserver/25494438c68b:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-06T08:54:29,142 INFO [RS:0;25494438c68b:34279 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:34279 2024-12-06T08:54:29,145 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34279-0x10066891d110001, quorum=127.0.0.1:51517, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/25494438c68b,34279,1733475265079 2024-12-06T08:54:29,145 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45419-0x10066891d110000, quorum=127.0.0.1:51517, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-06T08:54:29,145 INFO [RS:0;25494438c68b:34279 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-06T08:54:29,146 INFO [RS_CLOSE_META-regionserver/25494438c68b:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37087/user/jenkins/test-data/8d9230e9-842c-a3d1-0f07-c120fc914ad9/data/hbase/meta/1588230740/info/35e58a3cdd9a4dd9a5286f99ef59faca, entries=10, sequenceid=11, filesize=6.5 K 2024-12-06T08:54:29,146 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [25494438c68b,34279,1733475265079] 2024-12-06T08:54:29,148 DEBUG [RS_CLOSE_META-regionserver/25494438c68b:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37087/user/jenkins/test-data/8d9230e9-842c-a3d1-0f07-c120fc914ad9/data/hbase/meta/1588230740/.tmp/ns/50a3dc3b94084582bb453e76ab24ef9e as hdfs://localhost:37087/user/jenkins/test-data/8d9230e9-842c-a3d1-0f07-c120fc914ad9/data/hbase/meta/1588230740/ns/50a3dc3b94084582bb453e76ab24ef9e 2024-12-06T08:54:29,149 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/25494438c68b,34279,1733475265079 already deleted, retry=false 2024-12-06T08:54:29,149 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 25494438c68b,34279,1733475265079 expired; onlineServers=1 2024-12-06T08:54:29,159 INFO [RS_CLOSE_META-regionserver/25494438c68b:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37087/user/jenkins/test-data/8d9230e9-842c-a3d1-0f07-c120fc914ad9/data/hbase/meta/1588230740/ns/50a3dc3b94084582bb453e76ab24ef9e, entries=2, sequenceid=11, filesize=5.0 K 2024-12-06T08:54:29,160 DEBUG [RS_CLOSE_META-regionserver/25494438c68b:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37087/user/jenkins/test-data/8d9230e9-842c-a3d1-0f07-c120fc914ad9/data/hbase/meta/1588230740/.tmp/table/15697ea852fb4f8483e61ab35c1a2e3c as hdfs://localhost:37087/user/jenkins/test-data/8d9230e9-842c-a3d1-0f07-c120fc914ad9/data/hbase/meta/1588230740/table/15697ea852fb4f8483e61ab35c1a2e3c 2024-12-06T08:54:29,169 INFO [RS_CLOSE_META-regionserver/25494438c68b:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37087/user/jenkins/test-data/8d9230e9-842c-a3d1-0f07-c120fc914ad9/data/hbase/meta/1588230740/table/15697ea852fb4f8483e61ab35c1a2e3c, entries=2, sequenceid=11, filesize=5.1 K 2024-12-06T08:54:29,171 INFO [RS_CLOSE_META-regionserver/25494438c68b:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 239ms, sequenceid=11, compaction requested=false 2024-12-06T08:54:29,171 DEBUG [RS_CLOSE_META-regionserver/25494438c68b:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-06T08:54:29,180 DEBUG [RS_CLOSE_META-regionserver/25494438c68b:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37087/user/jenkins/test-data/8d9230e9-842c-a3d1-0f07-c120fc914ad9/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-12-06T08:54:29,181 DEBUG [RS_CLOSE_META-regionserver/25494438c68b:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-06T08:54:29,181 INFO [RS_CLOSE_META-regionserver/25494438c68b:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-06T08:54:29,181 DEBUG [RS_CLOSE_META-regionserver/25494438c68b:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733475268931Running coprocessor pre-close hooks at 1733475268931Disabling compacts and flushes for region at 1733475268931Disabling writes for close at 1733475268931Obtaining lock to block concurrent updates at 1733475268932 (+1 ms)Preparing flush snapshotting stores in 1588230740 at 1733475268932Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1377, getHeapSize=3392, getOffHeapSize=0, getCellsCount=14 at 1733475268933 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1733475268934 (+1 ms)Flushing 1588230740/info: creating writer at 1733475268935 (+1 ms)Flushing 1588230740/info: appending metadata at 1733475268988 (+53 ms)Flushing 1588230740/info: closing flushed file at 1733475268988Flushing 1588230740/ns: creating writer at 1733475269025 (+37 ms)Flushing 1588230740/ns: appending metadata at 1733475269050 (+25 ms)Flushing 1588230740/ns: closing flushed file at 1733475269050Flushing 1588230740/table: creating writer at 1733475269084 (+34 ms)Flushing 1588230740/table: appending metadata at 1733475269101 (+17 ms)Flushing 1588230740/table: closing flushed file at 1733475269101Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2fe64ab4: reopening flushed file at 1733475269129 (+28 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@33937921: reopening flushed file at 1733475269146 (+17 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@313b4ac8: reopening flushed file at 1733475269159 (+13 ms)Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 239ms, sequenceid=11, compaction requested=false at 1733475269171 (+12 ms)Writing region close event to WAL at 1733475269173 (+2 ms)Running coprocessor post-close hooks at 1733475269181 (+8 ms)Closed at 1733475269181 2024-12-06T08:54:29,182 DEBUG [RS_CLOSE_META-regionserver/25494438c68b:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-06T08:54:29,248 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34279-0x10066891d110001, quorum=127.0.0.1:51517, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-06T08:54:29,248 INFO [RS:0;25494438c68b:34279 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-06T08:54:29,249 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34279-0x10066891d110001, quorum=127.0.0.1:51517, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-06T08:54:29,249 INFO [RS:0;25494438c68b:34279 {}] regionserver.HRegionServer(1031): Exiting; stopping=25494438c68b,34279,1733475265079; zookeeper connection closed. 2024-12-06T08:54:29,249 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@33aed7eb {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@33aed7eb 2024-12-06T08:54:29,336 INFO [RS:1;25494438c68b:35585 {}] regionserver.HRegionServer(976): stopping server 25494438c68b,35585,1733475265197; all regions closed. 2024-12-06T08:54:29,340 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32979 is added to blk_1073741829_1019 (size=2751) 2024-12-06T08:54:29,340 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43435 is added to blk_1073741829_1019 (size=2751) 2024-12-06T08:54:29,340 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40317 is added to blk_1073741829_1019 (size=2751) 2024-12-06T08:54:29,344 DEBUG [RS:1;25494438c68b:35585 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/8d9230e9-842c-a3d1-0f07-c120fc914ad9/oldWALs 2024-12-06T08:54:29,344 INFO [RS:1;25494438c68b:35585 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 25494438c68b%2C35585%2C1733475265197.meta:.meta(num 1733475267077) 2024-12-06T08:54:29,347 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40317 is added to blk_1073741826_1016 (size=93) 2024-12-06T08:54:29,347 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32979 is added to blk_1073741826_1016 (size=93) 2024-12-06T08:54:29,348 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43435 is added to blk_1073741826_1016 (size=93) 2024-12-06T08:54:29,351 DEBUG [RS:1;25494438c68b:35585 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/8d9230e9-842c-a3d1-0f07-c120fc914ad9/oldWALs 2024-12-06T08:54:29,351 INFO [RS:1;25494438c68b:35585 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 25494438c68b%2C35585%2C1733475265197:(num 1733475266685) 2024-12-06T08:54:29,351 DEBUG [RS:1;25494438c68b:35585 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T08:54:29,351 INFO [RS:1;25494438c68b:35585 {}] regionserver.LeaseManager(133): Closed leases 2024-12-06T08:54:29,351 INFO [RS:1;25494438c68b:35585 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-06T08:54:29,351 INFO [RS:1;25494438c68b:35585 {}] hbase.ChoreService(370): Chore service for: regionserver/25494438c68b:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-06T08:54:29,351 INFO [RS:1;25494438c68b:35585 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-06T08:54:29,351 INFO [regionserver/25494438c68b:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-06T08:54:29,351 INFO [RS:1;25494438c68b:35585 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:35585 2024-12-06T08:54:29,355 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35585-0x10066891d110002, quorum=127.0.0.1:51517, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/25494438c68b,35585,1733475265197 2024-12-06T08:54:29,355 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45419-0x10066891d110000, quorum=127.0.0.1:51517, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-06T08:54:29,355 INFO [RS:1;25494438c68b:35585 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-06T08:54:29,357 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [25494438c68b,35585,1733475265197] 2024-12-06T08:54:29,358 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/25494438c68b,35585,1733475265197 already deleted, retry=false 2024-12-06T08:54:29,358 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 25494438c68b,35585,1733475265197 expired; onlineServers=0 2024-12-06T08:54:29,358 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '25494438c68b,45419,1733475264356' ***** 2024-12-06T08:54:29,359 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-06T08:54:29,359 INFO [M:0;25494438c68b:45419 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-06T08:54:29,359 INFO [M:0;25494438c68b:45419 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-06T08:54:29,359 DEBUG [M:0;25494438c68b:45419 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-06T08:54:29,359 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-06T08:54:29,359 DEBUG [M:0;25494438c68b:45419 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-06T08:54:29,359 DEBUG [master/25494438c68b:0:becomeActiveMaster-HFileCleaner.large.0-1733475266338 {}] cleaner.HFileCleaner(306): Exit Thread[master/25494438c68b:0:becomeActiveMaster-HFileCleaner.large.0-1733475266338,5,FailOnTimeoutGroup] 2024-12-06T08:54:29,359 DEBUG [master/25494438c68b:0:becomeActiveMaster-HFileCleaner.small.0-1733475266339 {}] cleaner.HFileCleaner(306): Exit Thread[master/25494438c68b:0:becomeActiveMaster-HFileCleaner.small.0-1733475266339,5,FailOnTimeoutGroup] 2024-12-06T08:54:29,359 INFO [M:0;25494438c68b:45419 {}] hbase.ChoreService(370): Chore service for: master/25494438c68b:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-06T08:54:29,360 INFO [M:0;25494438c68b:45419 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-06T08:54:29,360 DEBUG [M:0;25494438c68b:45419 {}] master.HMaster(1795): Stopping service threads 2024-12-06T08:54:29,360 INFO [M:0;25494438c68b:45419 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-06T08:54:29,360 INFO [M:0;25494438c68b:45419 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-06T08:54:29,361 INFO [M:0;25494438c68b:45419 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-06T08:54:29,361 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-06T08:54:29,361 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45419-0x10066891d110000, quorum=127.0.0.1:51517, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-06T08:54:29,361 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45419-0x10066891d110000, quorum=127.0.0.1:51517, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T08:54:29,362 DEBUG [M:0;25494438c68b:45419 {}] zookeeper.ZKUtil(347): master:45419-0x10066891d110000, quorum=127.0.0.1:51517, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-06T08:54:29,362 WARN [M:0;25494438c68b:45419 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-06T08:54:29,363 INFO [M:0;25494438c68b:45419 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:37087/user/jenkins/test-data/8d9230e9-842c-a3d1-0f07-c120fc914ad9/.lastflushedseqids 2024-12-06T08:54:29,374 WARN [M:0;25494438c68b:45419 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-06T08:54:29,375 WARN [M:0;25494438c68b:45419 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-06T08:54:29,377 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-502423260_22 at /127.0.0.1:56522 [Receiving block BP-291478311-172.17.0.2-1733475260908:blk_-9223372036854775584_1032] {}] datanode.DataXceiver(331): 127.0.0.1:40317:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:56522 dst: /127.0.0.1:40317 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:54:29,381 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40317 is added to blk_-9223372036854775584_1033 (size=127) 2024-12-06T08:54:29,382 WARN [M:0;25494438c68b:45419 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-06T08:54:29,382 INFO [M:0;25494438c68b:45419 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-06T08:54:29,382 INFO [M:0;25494438c68b:45419 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-06T08:54:29,382 DEBUG [M:0;25494438c68b:45419 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-06T08:54:29,382 INFO [M:0;25494438c68b:45419 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T08:54:29,382 DEBUG [M:0;25494438c68b:45419 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T08:54:29,382 DEBUG [M:0;25494438c68b:45419 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-06T08:54:29,382 DEBUG [M:0;25494438c68b:45419 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T08:54:29,383 INFO [M:0;25494438c68b:45419 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=26.81 KB heapSize=34.10 KB 2024-12-06T08:54:29,403 DEBUG [M:0;25494438c68b:45419 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37087/user/jenkins/test-data/8d9230e9-842c-a3d1-0f07-c120fc914ad9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/8fa58fa436c34fbea58da949e9e8eed7 is 82, key is hbase:meta,,1/info:regioninfo/1733475267172/Put/seqid=0 2024-12-06T08:54:29,405 WARN [M:0;25494438c68b:45419 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-06T08:54:29,405 WARN [M:0;25494438c68b:45419 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-06T08:54:29,408 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-502423260_22 at /127.0.0.1:56530 [Receiving block BP-291478311-172.17.0.2-1733475260908:blk_-9223372036854775568_1034] {}] datanode.DataXceiver(331): 127.0.0.1:40317:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:56530 dst: /127.0.0.1:40317 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:54:29,412 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40317 is added to blk_-9223372036854775568_1035 (size=5672) 2024-12-06T08:54:29,412 WARN [M:0;25494438c68b:45419 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-06T08:54:29,413 INFO [M:0;25494438c68b:45419 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:37087/user/jenkins/test-data/8d9230e9-842c-a3d1-0f07-c120fc914ad9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/8fa58fa436c34fbea58da949e9e8eed7 2024-12-06T08:54:29,438 DEBUG [M:0;25494438c68b:45419 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37087/user/jenkins/test-data/8d9230e9-842c-a3d1-0f07-c120fc914ad9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/112160c194b347fca4f435374e1c0c0d is 747, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1733475268037/Put/seqid=0 2024-12-06T08:54:29,440 WARN [M:0;25494438c68b:45419 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-06T08:54:29,440 WARN [M:0;25494438c68b:45419 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-06T08:54:29,443 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-502423260_22 at /127.0.0.1:59436 [Receiving block BP-291478311-172.17.0.2-1733475260908:blk_-9223372036854775552_1036] {}] datanode.DataXceiver(331): 127.0.0.1:43435:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:59436 dst: /127.0.0.1:43435 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:54:29,447 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43435 is added to blk_-9223372036854775552_1037 (size=6437) 2024-12-06T08:54:29,448 WARN [M:0;25494438c68b:45419 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-06T08:54:29,448 INFO [M:0;25494438c68b:45419 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.12 KB at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:37087/user/jenkins/test-data/8d9230e9-842c-a3d1-0f07-c120fc914ad9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/112160c194b347fca4f435374e1c0c0d 2024-12-06T08:54:29,457 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35585-0x10066891d110002, quorum=127.0.0.1:51517, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-06T08:54:29,457 INFO [RS:1;25494438c68b:35585 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-06T08:54:29,457 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35585-0x10066891d110002, quorum=127.0.0.1:51517, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-06T08:54:29,457 INFO [RS:1;25494438c68b:35585 {}] regionserver.HRegionServer(1031): Exiting; stopping=25494438c68b,35585,1733475265197; zookeeper connection closed. 2024-12-06T08:54:29,457 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@17496dd {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@17496dd 2024-12-06T08:54:29,458 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 3 regionserver(s) complete 2024-12-06T08:54:29,474 DEBUG [M:0;25494438c68b:45419 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37087/user/jenkins/test-data/8d9230e9-842c-a3d1-0f07-c120fc914ad9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/87d32a091cf544a3911c5ae480589e38 is 69, key is 25494438c68b,34279,1733475265079/rs:state/1733475266444/Put/seqid=0 2024-12-06T08:54:29,476 WARN [M:0;25494438c68b:45419 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-06T08:54:29,476 WARN [M:0;25494438c68b:45419 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-06T08:54:29,480 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-502423260_22 at /127.0.0.1:56546 [Receiving block BP-291478311-172.17.0.2-1733475260908:blk_-9223372036854775536_1038] {}] datanode.DataXceiver(331): 127.0.0.1:40317:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:56546 dst: /127.0.0.1:40317 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:54:29,484 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40317 is added to blk_-9223372036854775536_1039 (size=5294) 2024-12-06T08:54:29,485 WARN [M:0;25494438c68b:45419 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-06T08:54:29,485 INFO [M:0;25494438c68b:45419 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=195 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:37087/user/jenkins/test-data/8d9230e9-842c-a3d1-0f07-c120fc914ad9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/87d32a091cf544a3911c5ae480589e38 2024-12-06T08:54:29,496 DEBUG [M:0;25494438c68b:45419 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37087/user/jenkins/test-data/8d9230e9-842c-a3d1-0f07-c120fc914ad9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/8fa58fa436c34fbea58da949e9e8eed7 as hdfs://localhost:37087/user/jenkins/test-data/8d9230e9-842c-a3d1-0f07-c120fc914ad9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/8fa58fa436c34fbea58da949e9e8eed7 2024-12-06T08:54:29,505 INFO [M:0;25494438c68b:45419 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37087/user/jenkins/test-data/8d9230e9-842c-a3d1-0f07-c120fc914ad9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/8fa58fa436c34fbea58da949e9e8eed7, entries=8, sequenceid=72, filesize=5.5 K 2024-12-06T08:54:29,507 DEBUG [M:0;25494438c68b:45419 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37087/user/jenkins/test-data/8d9230e9-842c-a3d1-0f07-c120fc914ad9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/112160c194b347fca4f435374e1c0c0d as hdfs://localhost:37087/user/jenkins/test-data/8d9230e9-842c-a3d1-0f07-c120fc914ad9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/112160c194b347fca4f435374e1c0c0d 2024-12-06T08:54:29,515 INFO [M:0;25494438c68b:45419 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37087/user/jenkins/test-data/8d9230e9-842c-a3d1-0f07-c120fc914ad9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/112160c194b347fca4f435374e1c0c0d, entries=8, sequenceid=72, filesize=6.3 K 2024-12-06T08:54:29,517 DEBUG [M:0;25494438c68b:45419 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37087/user/jenkins/test-data/8d9230e9-842c-a3d1-0f07-c120fc914ad9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/87d32a091cf544a3911c5ae480589e38 as hdfs://localhost:37087/user/jenkins/test-data/8d9230e9-842c-a3d1-0f07-c120fc914ad9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/87d32a091cf544a3911c5ae480589e38 2024-12-06T08:54:29,525 INFO [M:0;25494438c68b:45419 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37087/user/jenkins/test-data/8d9230e9-842c-a3d1-0f07-c120fc914ad9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/87d32a091cf544a3911c5ae480589e38, entries=3, sequenceid=72, filesize=5.2 K 2024-12-06T08:54:29,527 INFO [M:0;25494438c68b:45419 {}] regionserver.HRegion(3140): Finished flush of dataSize ~26.81 KB/27450, heapSize ~33.80 KB/34616, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 143ms, sequenceid=72, compaction requested=false 2024-12-06T08:54:29,528 INFO [M:0;25494438c68b:45419 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T08:54:29,528 DEBUG [M:0;25494438c68b:45419 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733475269382Disabling compacts and flushes for region at 1733475269382Disabling writes for close at 1733475269382Obtaining lock to block concurrent updates at 1733475269383 (+1 ms)Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733475269383Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=27450, getHeapSize=34856, getOffHeapSize=0, getCellsCount=85 at 1733475269383Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733475269384 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733475269384Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733475269402 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733475269402Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733475269421 (+19 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733475269437 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733475269437Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733475269456 (+19 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733475269474 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733475269474Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2c17735: reopening flushed file at 1733475269495 (+21 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7a21af2c: reopening flushed file at 1733475269506 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@65155dba: reopening flushed file at 1733475269515 (+9 ms)Finished flush of dataSize ~26.81 KB/27450, heapSize ~33.80 KB/34616, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 143ms, sequenceid=72, compaction requested=false at 1733475269527 (+12 ms)Writing region close event to WAL at 1733475269528 (+1 ms)Closed at 1733475269528 2024-12-06T08:54:29,532 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40317 is added to blk_1073741825_1011 (size=32653) 2024-12-06T08:54:29,532 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32979 is added to blk_1073741825_1011 (size=32653) 2024-12-06T08:54:29,532 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43435 is added to blk_1073741825_1011 (size=32653) 2024-12-06T08:54:29,533 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-06T08:54:29,533 INFO [M:0;25494438c68b:45419 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-06T08:54:29,533 INFO [M:0;25494438c68b:45419 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:45419 2024-12-06T08:54:29,534 INFO [M:0;25494438c68b:45419 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-06T08:54:29,636 INFO [M:0;25494438c68b:45419 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-06T08:54:29,636 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45419-0x10066891d110000, quorum=127.0.0.1:51517, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-06T08:54:29,636 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45419-0x10066891d110000, quorum=127.0.0.1:51517, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-06T08:54:29,642 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2a9ecb50{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-06T08:54:29,646 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4067fd8a{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-06T08:54:29,646 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-06T08:54:29,647 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@146c020c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-06T08:54:29,647 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@233bb3ea{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8ddbb958-65a4-ef8c-2a6d-24135ce9116d/hadoop.log.dir/,STOPPED} 2024-12-06T08:54:29,650 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-06T08:54:29,650 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-06T08:54:29,651 WARN [BP-291478311-172.17.0.2-1733475260908 heartbeating to localhost/127.0.0.1:37087 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-06T08:54:29,651 WARN [BP-291478311-172.17.0.2-1733475260908 heartbeating to localhost/127.0.0.1:37087 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-291478311-172.17.0.2-1733475260908 (Datanode Uuid f60d1d2b-cb74-4250-94d7-087a9597ca1b) service to localhost/127.0.0.1:37087 2024-12-06T08:54:29,653 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8ddbb958-65a4-ef8c-2a6d-24135ce9116d/cluster_b85cb446-0f54-2bc9-dc8e-bbf053994405/data/data5/current/BP-291478311-172.17.0.2-1733475260908 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-06T08:54:29,653 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8ddbb958-65a4-ef8c-2a6d-24135ce9116d/cluster_b85cb446-0f54-2bc9-dc8e-bbf053994405/data/data6/current/BP-291478311-172.17.0.2-1733475260908 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-06T08:54:29,654 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-06T08:54:29,656 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@14402056{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-06T08:54:29,657 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3e1cb3ec{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-06T08:54:29,657 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-06T08:54:29,657 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@65cd6e19{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-06T08:54:29,657 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5435fd88{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8ddbb958-65a4-ef8c-2a6d-24135ce9116d/hadoop.log.dir/,STOPPED} 2024-12-06T08:54:29,659 WARN [BP-291478311-172.17.0.2-1733475260908 heartbeating to localhost/127.0.0.1:37087 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-06T08:54:29,659 WARN [BP-291478311-172.17.0.2-1733475260908 heartbeating to localhost/127.0.0.1:37087 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-291478311-172.17.0.2-1733475260908 (Datanode Uuid 33a579dd-2608-4ac0-bdc6-1d7cd3f53e32) service to localhost/127.0.0.1:37087 2024-12-06T08:54:29,660 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-06T08:54:29,660 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-06T08:54:29,660 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8ddbb958-65a4-ef8c-2a6d-24135ce9116d/cluster_b85cb446-0f54-2bc9-dc8e-bbf053994405/data/data3/current/BP-291478311-172.17.0.2-1733475260908 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-06T08:54:29,661 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8ddbb958-65a4-ef8c-2a6d-24135ce9116d/cluster_b85cb446-0f54-2bc9-dc8e-bbf053994405/data/data4/current/BP-291478311-172.17.0.2-1733475260908 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-06T08:54:29,661 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-06T08:54:29,666 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5b23cf15{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-06T08:54:29,667 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7f9e5902{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-06T08:54:29,667 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-06T08:54:29,667 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3b3a0659{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-06T08:54:29,667 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3665148e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8ddbb958-65a4-ef8c-2a6d-24135ce9116d/hadoop.log.dir/,STOPPED} 2024-12-06T08:54:29,669 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-06T08:54:29,669 WARN [BP-291478311-172.17.0.2-1733475260908 heartbeating to localhost/127.0.0.1:37087 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-06T08:54:29,669 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-06T08:54:29,669 WARN [BP-291478311-172.17.0.2-1733475260908 heartbeating to localhost/127.0.0.1:37087 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-291478311-172.17.0.2-1733475260908 (Datanode Uuid 0f1873dd-1d16-4804-87d4-af54c97f4833) service to localhost/127.0.0.1:37087 2024-12-06T08:54:29,669 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8ddbb958-65a4-ef8c-2a6d-24135ce9116d/cluster_b85cb446-0f54-2bc9-dc8e-bbf053994405/data/data1/current/BP-291478311-172.17.0.2-1733475260908 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-06T08:54:29,670 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8ddbb958-65a4-ef8c-2a6d-24135ce9116d/cluster_b85cb446-0f54-2bc9-dc8e-bbf053994405/data/data2/current/BP-291478311-172.17.0.2-1733475260908 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-06T08:54:29,670 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-06T08:54:29,678 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@62d6efd9{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-06T08:54:29,678 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@353d35a1{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-06T08:54:29,678 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-06T08:54:29,679 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@ce709a8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-06T08:54:29,679 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@760c69c0{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8ddbb958-65a4-ef8c-2a6d-24135ce9116d/hadoop.log.dir/,STOPPED} 2024-12-06T08:54:29,689 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-06T08:54:29,720 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-06T08:54:29,729 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestHBaseWalOnEC#testReadWrite[0] Thread=91 (was 158), OpenFileDescriptor=443 (was 391) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=215 (was 208) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=8782 (was 9078) 2024-12-06T08:54:29,737 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestHBaseWalOnEC#testReadWrite[1] Thread=91, OpenFileDescriptor=443, MaxFileDescriptor=1048576, SystemLoadAverage=215, ProcessCount=11, AvailableMemoryMB=8782 2024-12-06T08:54:29,737 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=3, rsPorts=, rsClass=null, numDataNodes=3, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-06T08:54:29,738 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8ddbb958-65a4-ef8c-2a6d-24135ce9116d/hadoop.log.dir so I do NOT create it in target/test-data/549b6d72-8ca8-2512-d8fa-e29199bcd171 2024-12-06T08:54:29,738 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8ddbb958-65a4-ef8c-2a6d-24135ce9116d/hadoop.tmp.dir so I do NOT create it in target/test-data/549b6d72-8ca8-2512-d8fa-e29199bcd171 2024-12-06T08:54:29,738 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/549b6d72-8ca8-2512-d8fa-e29199bcd171/cluster_16955451-ced9-75a0-5e67-0ca948b704b1, deleteOnExit=true 2024-12-06T08:54:29,738 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-06T08:54:29,738 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/549b6d72-8ca8-2512-d8fa-e29199bcd171/test.cache.data in system properties and HBase conf 2024-12-06T08:54:29,738 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/549b6d72-8ca8-2512-d8fa-e29199bcd171/hadoop.tmp.dir in system properties and HBase conf 2024-12-06T08:54:29,738 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/549b6d72-8ca8-2512-d8fa-e29199bcd171/hadoop.log.dir in system properties and HBase conf 2024-12-06T08:54:29,739 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/549b6d72-8ca8-2512-d8fa-e29199bcd171/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-06T08:54:29,739 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/549b6d72-8ca8-2512-d8fa-e29199bcd171/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-06T08:54:29,739 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-06T08:54:29,739 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-06T08:54:29,739 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/549b6d72-8ca8-2512-d8fa-e29199bcd171/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-06T08:54:29,739 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/549b6d72-8ca8-2512-d8fa-e29199bcd171/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-06T08:54:29,739 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/549b6d72-8ca8-2512-d8fa-e29199bcd171/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-06T08:54:29,739 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/549b6d72-8ca8-2512-d8fa-e29199bcd171/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-06T08:54:29,740 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/549b6d72-8ca8-2512-d8fa-e29199bcd171/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-06T08:54:29,740 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/549b6d72-8ca8-2512-d8fa-e29199bcd171/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-06T08:54:29,740 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/549b6d72-8ca8-2512-d8fa-e29199bcd171/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-06T08:54:29,740 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/549b6d72-8ca8-2512-d8fa-e29199bcd171/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-06T08:54:29,740 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/549b6d72-8ca8-2512-d8fa-e29199bcd171/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-06T08:54:29,740 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/549b6d72-8ca8-2512-d8fa-e29199bcd171/nfs.dump.dir in system properties and HBase conf 2024-12-06T08:54:29,740 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/549b6d72-8ca8-2512-d8fa-e29199bcd171/java.io.tmpdir in system properties and HBase conf 2024-12-06T08:54:29,740 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/549b6d72-8ca8-2512-d8fa-e29199bcd171/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-06T08:54:29,740 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/549b6d72-8ca8-2512-d8fa-e29199bcd171/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-06T08:54:29,740 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/549b6d72-8ca8-2512-d8fa-e29199bcd171/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-06T08:54:29,836 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-06T08:54:29,842 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-06T08:54:29,843 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-06T08:54:29,843 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-06T08:54:29,843 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-06T08:54:29,844 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-06T08:54:29,845 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1921d73d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/549b6d72-8ca8-2512-d8fa-e29199bcd171/hadoop.log.dir/,AVAILABLE} 2024-12-06T08:54:29,845 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@117b7671{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-06T08:54:29,961 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5d637fa1{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/549b6d72-8ca8-2512-d8fa-e29199bcd171/java.io.tmpdir/jetty-localhost-41345-hadoop-hdfs-3_4_1-tests_jar-_-any-11899688009578511461/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-06T08:54:29,962 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6684a7bb{HTTP/1.1, (http/1.1)}{localhost:41345} 2024-12-06T08:54:29,962 INFO [Time-limited test {}] server.Server(415): Started @11426ms 2024-12-06T08:54:30,057 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-06T08:54:30,061 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-06T08:54:30,063 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-06T08:54:30,063 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-06T08:54:30,063 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-06T08:54:30,064 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3ef18cbe{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/549b6d72-8ca8-2512-d8fa-e29199bcd171/hadoop.log.dir/,AVAILABLE} 2024-12-06T08:54:30,064 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3bfb2a2f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-06T08:54:30,190 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1aa34083{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/549b6d72-8ca8-2512-d8fa-e29199bcd171/java.io.tmpdir/jetty-localhost-43463-hadoop-hdfs-3_4_1-tests_jar-_-any-4219557060728832385/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-06T08:54:30,190 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@234ffd1d{HTTP/1.1, (http/1.1)}{localhost:43463} 2024-12-06T08:54:30,190 INFO [Time-limited test {}] server.Server(415): Started @11654ms 2024-12-06T08:54:30,192 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-06T08:54:30,232 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-06T08:54:30,237 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-06T08:54:30,240 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-06T08:54:30,240 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-06T08:54:30,240 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-06T08:54:30,240 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6de03e39{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/549b6d72-8ca8-2512-d8fa-e29199bcd171/hadoop.log.dir/,AVAILABLE} 2024-12-06T08:54:30,241 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@54390a13{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-06T08:54:30,291 WARN [Thread-526 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/549b6d72-8ca8-2512-d8fa-e29199bcd171/cluster_16955451-ced9-75a0-5e67-0ca948b704b1/data/data1/current/BP-1653138612-172.17.0.2-1733475269774/current, will proceed with Du for space computation calculation, 2024-12-06T08:54:30,291 WARN [Thread-527 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/549b6d72-8ca8-2512-d8fa-e29199bcd171/cluster_16955451-ced9-75a0-5e67-0ca948b704b1/data/data2/current/BP-1653138612-172.17.0.2-1733475269774/current, will proceed with Du for space computation calculation, 2024-12-06T08:54:30,308 WARN [Thread-505 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-06T08:54:30,312 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xfc7b10a151247815 with lease ID 0x389795ab5a4b8035: Processing first storage report for DS-f516c65d-4e5c-4971-a871-310b0bbbb2a4 from datanode DatanodeRegistration(127.0.0.1:35811, datanodeUuid=c559ffaf-7d4b-4bb2-85de-a3eadac2f48f, infoPort=44507, infoSecurePort=0, ipcPort=34109, storageInfo=lv=-57;cid=testClusterID;nsid=626585373;c=1733475269774) 2024-12-06T08:54:30,312 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xfc7b10a151247815 with lease ID 0x389795ab5a4b8035: from storage DS-f516c65d-4e5c-4971-a871-310b0bbbb2a4 node DatanodeRegistration(127.0.0.1:35811, datanodeUuid=c559ffaf-7d4b-4bb2-85de-a3eadac2f48f, infoPort=44507, infoSecurePort=0, ipcPort=34109, storageInfo=lv=-57;cid=testClusterID;nsid=626585373;c=1733475269774), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-06T08:54:30,312 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xfc7b10a151247815 with lease ID 0x389795ab5a4b8035: Processing first storage report for DS-1d6c01fe-ff94-4ff1-9056-3b0de07deac8 from datanode DatanodeRegistration(127.0.0.1:35811, datanodeUuid=c559ffaf-7d4b-4bb2-85de-a3eadac2f48f, infoPort=44507, infoSecurePort=0, ipcPort=34109, storageInfo=lv=-57;cid=testClusterID;nsid=626585373;c=1733475269774) 2024-12-06T08:54:30,312 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xfc7b10a151247815 with lease ID 0x389795ab5a4b8035: from storage DS-1d6c01fe-ff94-4ff1-9056-3b0de07deac8 node DatanodeRegistration(127.0.0.1:35811, datanodeUuid=c559ffaf-7d4b-4bb2-85de-a3eadac2f48f, infoPort=44507, infoSecurePort=0, ipcPort=34109, storageInfo=lv=-57;cid=testClusterID;nsid=626585373;c=1733475269774), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-06T08:54:30,359 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@65e525b7{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/549b6d72-8ca8-2512-d8fa-e29199bcd171/java.io.tmpdir/jetty-localhost-41983-hadoop-hdfs-3_4_1-tests_jar-_-any-7621376710046848658/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-06T08:54:30,359 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4bd07676{HTTP/1.1, (http/1.1)}{localhost:41983} 2024-12-06T08:54:30,360 INFO [Time-limited test {}] server.Server(415): Started @11824ms 2024-12-06T08:54:30,361 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-06T08:54:30,393 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-06T08:54:30,396 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-06T08:54:30,397 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-06T08:54:30,397 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-06T08:54:30,397 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-06T08:54:30,398 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@611fd8d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/549b6d72-8ca8-2512-d8fa-e29199bcd171/hadoop.log.dir/,AVAILABLE} 2024-12-06T08:54:30,398 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@d6974a6{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-06T08:54:30,472 WARN [Thread-561 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/549b6d72-8ca8-2512-d8fa-e29199bcd171/cluster_16955451-ced9-75a0-5e67-0ca948b704b1/data/data3/current/BP-1653138612-172.17.0.2-1733475269774/current, will proceed with Du for space computation calculation, 2024-12-06T08:54:30,472 WARN [Thread-562 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/549b6d72-8ca8-2512-d8fa-e29199bcd171/cluster_16955451-ced9-75a0-5e67-0ca948b704b1/data/data4/current/BP-1653138612-172.17.0.2-1733475269774/current, will proceed with Du for space computation calculation, 2024-12-06T08:54:30,497 WARN [Thread-541 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-06T08:54:30,500 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x59c6da6cb826747c with lease ID 0x389795ab5a4b8036: Processing first storage report for DS-934aa2e7-a9c3-4960-8eb5-143435e83803 from datanode DatanodeRegistration(127.0.0.1:41047, datanodeUuid=719839d2-1818-4496-8441-cf87aedcce6a, infoPort=42041, infoSecurePort=0, ipcPort=33067, storageInfo=lv=-57;cid=testClusterID;nsid=626585373;c=1733475269774) 2024-12-06T08:54:30,500 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x59c6da6cb826747c with lease ID 0x389795ab5a4b8036: from storage DS-934aa2e7-a9c3-4960-8eb5-143435e83803 node DatanodeRegistration(127.0.0.1:41047, datanodeUuid=719839d2-1818-4496-8441-cf87aedcce6a, infoPort=42041, infoSecurePort=0, ipcPort=33067, storageInfo=lv=-57;cid=testClusterID;nsid=626585373;c=1733475269774), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-06T08:54:30,500 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x59c6da6cb826747c with lease ID 0x389795ab5a4b8036: Processing first storage report for DS-52474c33-bedd-4ca7-852a-f86ba2e12a89 from datanode DatanodeRegistration(127.0.0.1:41047, datanodeUuid=719839d2-1818-4496-8441-cf87aedcce6a, infoPort=42041, infoSecurePort=0, ipcPort=33067, storageInfo=lv=-57;cid=testClusterID;nsid=626585373;c=1733475269774) 2024-12-06T08:54:30,500 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x59c6da6cb826747c with lease ID 0x389795ab5a4b8036: from storage DS-52474c33-bedd-4ca7-852a-f86ba2e12a89 node DatanodeRegistration(127.0.0.1:41047, datanodeUuid=719839d2-1818-4496-8441-cf87aedcce6a, infoPort=42041, infoSecurePort=0, ipcPort=33067, storageInfo=lv=-57;cid=testClusterID;nsid=626585373;c=1733475269774), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-06T08:54:30,518 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3939dfd7{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/549b6d72-8ca8-2512-d8fa-e29199bcd171/java.io.tmpdir/jetty-localhost-35161-hadoop-hdfs-3_4_1-tests_jar-_-any-12612489651454188926/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-06T08:54:30,519 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6419fd60{HTTP/1.1, (http/1.1)}{localhost:35161} 2024-12-06T08:54:30,519 INFO [Time-limited test {}] server.Server(415): Started @11983ms 2024-12-06T08:54:30,521 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-06T08:54:30,627 WARN [Thread-587 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/549b6d72-8ca8-2512-d8fa-e29199bcd171/cluster_16955451-ced9-75a0-5e67-0ca948b704b1/data/data5/current/BP-1653138612-172.17.0.2-1733475269774/current, will proceed with Du for space computation calculation, 2024-12-06T08:54:30,627 WARN [Thread-588 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/549b6d72-8ca8-2512-d8fa-e29199bcd171/cluster_16955451-ced9-75a0-5e67-0ca948b704b1/data/data6/current/BP-1653138612-172.17.0.2-1733475269774/current, will proceed with Du for space computation calculation, 2024-12-06T08:54:30,656 WARN [Thread-576 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-06T08:54:30,660 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xd84c610f80621ed8 with lease ID 0x389795ab5a4b8037: Processing first storage report for DS-adfa58ee-8a1a-4e3b-8f4c-09d737c202a0 from datanode DatanodeRegistration(127.0.0.1:41243, datanodeUuid=6193c713-3c98-4ec7-9ea7-444dca86cfba, infoPort=36149, infoSecurePort=0, ipcPort=38673, storageInfo=lv=-57;cid=testClusterID;nsid=626585373;c=1733475269774) 2024-12-06T08:54:30,660 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd84c610f80621ed8 with lease ID 0x389795ab5a4b8037: from storage DS-adfa58ee-8a1a-4e3b-8f4c-09d737c202a0 node DatanodeRegistration(127.0.0.1:41243, datanodeUuid=6193c713-3c98-4ec7-9ea7-444dca86cfba, infoPort=36149, infoSecurePort=0, ipcPort=38673, storageInfo=lv=-57;cid=testClusterID;nsid=626585373;c=1733475269774), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-06T08:54:30,660 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xd84c610f80621ed8 with lease ID 0x389795ab5a4b8037: Processing first storage report for DS-2388ece4-533d-4ed4-8ad1-bc62f9c501de from datanode DatanodeRegistration(127.0.0.1:41243, datanodeUuid=6193c713-3c98-4ec7-9ea7-444dca86cfba, infoPort=36149, infoSecurePort=0, ipcPort=38673, storageInfo=lv=-57;cid=testClusterID;nsid=626585373;c=1733475269774) 2024-12-06T08:54:30,660 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd84c610f80621ed8 with lease ID 0x389795ab5a4b8037: from storage DS-2388ece4-533d-4ed4-8ad1-bc62f9c501de node DatanodeRegistration(127.0.0.1:41243, datanodeUuid=6193c713-3c98-4ec7-9ea7-444dca86cfba, infoPort=36149, infoSecurePort=0, ipcPort=38673, storageInfo=lv=-57;cid=testClusterID;nsid=626585373;c=1733475269774), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-06T08:54:30,749 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/549b6d72-8ca8-2512-d8fa-e29199bcd171 2024-12-06T08:54:30,750 DEBUG [Time-limited test {}] zookeeper.MiniZooKeeperCluster(246): Failed binding ZK Server to client port: 59434 java.net.BindException: Address already in use at sun.nio.ch.Net.bind0(Native Method) ~[?:?] at sun.nio.ch.Net.bind(Net.java:555) ~[?:?] at sun.nio.ch.ServerSocketChannelImpl.netBind(ServerSocketChannelImpl.java:337) ~[?:?] at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:294) ~[?:?] at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:89) ~[?:?] at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:81) ~[?:?] at org.apache.zookeeper.server.NIOServerCnxnFactory.configure(NIOServerCnxnFactory.java:662) ~[zookeeper-3.8.4.jar:3.8.4] at org.apache.zookeeper.server.ServerCnxnFactory.configure(ServerCnxnFactory.java:109) ~[zookeeper-3.8.4.jar:3.8.4] at org.apache.zookeeper.server.ServerCnxnFactory.configure(ServerCnxnFactory.java:105) ~[zookeeper-3.8.4.jar:3.8.4] at org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster.startup(MiniZooKeeperCluster.java:242) ~[hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.HBaseZKTestingUtil.startMiniZKCluster(HBaseZKTestingUtil.java:131) ~[hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT-tests.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.HBaseZKTestingUtil.startMiniZKCluster(HBaseZKTestingUtil.java:104) ~[hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT-tests.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.HBaseTestingUtil.startMiniCluster(HBaseTestingUtil.java:826) ~[test-classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.HBaseTestingUtil.startMiniCluster(HBaseTestingUtil.java:784) ~[test-classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.setUp(TestHBaseWalOnEC.java:96) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.invokeMethod(RunBefores.java:33) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:24) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.run(ParentRunner.java:413) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.Suite.runChild(Suite.java:128) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.Suite.runChild(Suite.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:54:30,753 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/549b6d72-8ca8-2512-d8fa-e29199bcd171/cluster_16955451-ced9-75a0-5e67-0ca948b704b1/zookeeper_0, clientPort=59435, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/549b6d72-8ca8-2512-d8fa-e29199bcd171/cluster_16955451-ced9-75a0-5e67-0ca948b704b1/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/549b6d72-8ca8-2512-d8fa-e29199bcd171/cluster_16955451-ced9-75a0-5e67-0ca948b704b1/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-06T08:54:30,754 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=59435 2024-12-06T08:54:30,754 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T08:54:30,756 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T08:54:30,768 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41243 is added to blk_1073741825_1001 (size=7) 2024-12-06T08:54:30,769 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41047 is added to blk_1073741825_1001 (size=7) 2024-12-06T08:54:30,770 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35811 is added to blk_1073741825_1001 (size=7) 2024-12-06T08:54:30,771 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:45531/user/jenkins/test-data/82bbc703-c541-e4ea-52c7-ef23c1e6fd07 with version=8 2024-12-06T08:54:30,771 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:37087/user/jenkins/test-data/8d9230e9-842c-a3d1-0f07-c120fc914ad9/hbase-staging 2024-12-06T08:54:30,774 INFO [Time-limited test {}] client.ConnectionUtils(128): master/25494438c68b:0 server-side Connection retries=45 2024-12-06T08:54:30,774 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-06T08:54:30,774 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-06T08:54:30,774 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-06T08:54:30,774 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-06T08:54:30,774 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-06T08:54:30,774 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-06T08:54:30,774 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-06T08:54:30,776 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:43489 2024-12-06T08:54:30,777 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:43489 connecting to ZooKeeper ensemble=127.0.0.1:59435 2024-12-06T08:54:30,784 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:434890x0, quorum=127.0.0.1:59435, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-06T08:54:30,784 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:43489-0x100668939470000 connected 2024-12-06T08:54:30,799 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T08:54:30,801 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T08:54:30,802 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:43489-0x100668939470000, quorum=127.0.0.1:59435, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-06T08:54:30,803 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:45531/user/jenkins/test-data/82bbc703-c541-e4ea-52c7-ef23c1e6fd07, hbase.cluster.distributed=false 2024-12-06T08:54:30,804 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:43489-0x100668939470000, quorum=127.0.0.1:59435, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-06T08:54:30,804 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=43489 2024-12-06T08:54:30,805 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=43489 2024-12-06T08:54:30,805 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=43489 2024-12-06T08:54:30,805 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=43489 2024-12-06T08:54:30,805 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=43489 2024-12-06T08:54:30,822 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/25494438c68b:0 server-side Connection retries=45 2024-12-06T08:54:30,822 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-06T08:54:30,822 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-06T08:54:30,822 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-06T08:54:30,822 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-06T08:54:30,822 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-06T08:54:30,822 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-06T08:54:30,823 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-06T08:54:30,823 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:38179 2024-12-06T08:54:30,825 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:38179 connecting to ZooKeeper ensemble=127.0.0.1:59435 2024-12-06T08:54:30,826 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T08:54:30,827 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T08:54:30,832 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:381790x0, quorum=127.0.0.1:59435, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-06T08:54:30,832 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:38179-0x100668939470001 connected 2024-12-06T08:54:30,832 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38179-0x100668939470001, quorum=127.0.0.1:59435, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-06T08:54:30,833 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-06T08:54:30,833 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-06T08:54:30,834 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38179-0x100668939470001, quorum=127.0.0.1:59435, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-06T08:54:30,835 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38179-0x100668939470001, quorum=127.0.0.1:59435, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-06T08:54:30,835 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=38179 2024-12-06T08:54:30,836 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=38179 2024-12-06T08:54:30,836 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=38179 2024-12-06T08:54:30,836 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=38179 2024-12-06T08:54:30,836 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=38179 2024-12-06T08:54:30,852 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/25494438c68b:0 server-side Connection retries=45 2024-12-06T08:54:30,852 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-06T08:54:30,852 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-06T08:54:30,852 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-06T08:54:30,852 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-06T08:54:30,852 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-06T08:54:30,852 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-06T08:54:30,853 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-06T08:54:30,853 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:36685 2024-12-06T08:54:30,855 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:36685 connecting to ZooKeeper ensemble=127.0.0.1:59435 2024-12-06T08:54:30,855 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T08:54:30,857 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T08:54:30,862 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:366850x0, quorum=127.0.0.1:59435, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-06T08:54:30,862 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:366850x0, quorum=127.0.0.1:59435, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-06T08:54:30,862 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:36685-0x100668939470002 connected 2024-12-06T08:54:30,862 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-06T08:54:30,863 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-06T08:54:30,864 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36685-0x100668939470002, quorum=127.0.0.1:59435, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-06T08:54:30,865 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36685-0x100668939470002, quorum=127.0.0.1:59435, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-06T08:54:30,865 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=36685 2024-12-06T08:54:30,865 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=36685 2024-12-06T08:54:30,866 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=36685 2024-12-06T08:54:30,866 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=36685 2024-12-06T08:54:30,866 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=36685 2024-12-06T08:54:30,882 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/25494438c68b:0 server-side Connection retries=45 2024-12-06T08:54:30,882 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-06T08:54:30,882 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-06T08:54:30,883 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-06T08:54:30,883 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-06T08:54:30,883 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-06T08:54:30,883 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-06T08:54:30,883 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-06T08:54:30,884 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:32887 2024-12-06T08:54:30,885 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:32887 connecting to ZooKeeper ensemble=127.0.0.1:59435 2024-12-06T08:54:30,886 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T08:54:30,887 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T08:54:30,892 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:328870x0, quorum=127.0.0.1:59435, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-06T08:54:30,892 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:32887-0x100668939470003 connected 2024-12-06T08:54:30,892 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:32887-0x100668939470003, quorum=127.0.0.1:59435, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-06T08:54:30,893 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-06T08:54:30,893 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-06T08:54:30,894 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:32887-0x100668939470003, quorum=127.0.0.1:59435, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-06T08:54:30,895 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:32887-0x100668939470003, quorum=127.0.0.1:59435, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-06T08:54:30,896 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=32887 2024-12-06T08:54:30,896 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=32887 2024-12-06T08:54:30,896 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=32887 2024-12-06T08:54:30,897 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=32887 2024-12-06T08:54:30,897 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=32887 2024-12-06T08:54:30,908 DEBUG [M:0;25494438c68b:43489 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;25494438c68b:43489 2024-12-06T08:54:30,909 INFO [master/25494438c68b:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/25494438c68b,43489,1733475270773 2024-12-06T08:54:30,911 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32887-0x100668939470003, quorum=127.0.0.1:59435, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-06T08:54:30,911 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43489-0x100668939470000, quorum=127.0.0.1:59435, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-06T08:54:30,911 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36685-0x100668939470002, quorum=127.0.0.1:59435, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-06T08:54:30,911 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38179-0x100668939470001, quorum=127.0.0.1:59435, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-06T08:54:30,912 DEBUG [master/25494438c68b:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:43489-0x100668939470000, quorum=127.0.0.1:59435, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/25494438c68b,43489,1733475270773 2024-12-06T08:54:30,914 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32887-0x100668939470003, quorum=127.0.0.1:59435, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-06T08:54:30,914 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36685-0x100668939470002, quorum=127.0.0.1:59435, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-06T08:54:30,914 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36685-0x100668939470002, quorum=127.0.0.1:59435, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T08:54:30,914 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43489-0x100668939470000, quorum=127.0.0.1:59435, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T08:54:30,914 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32887-0x100668939470003, quorum=127.0.0.1:59435, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T08:54:30,914 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38179-0x100668939470001, quorum=127.0.0.1:59435, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-06T08:54:30,914 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38179-0x100668939470001, quorum=127.0.0.1:59435, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T08:54:30,914 DEBUG [master/25494438c68b:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:43489-0x100668939470000, quorum=127.0.0.1:59435, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-06T08:54:30,915 INFO [master/25494438c68b:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/25494438c68b,43489,1733475270773 from backup master directory 2024-12-06T08:54:30,917 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38179-0x100668939470001, quorum=127.0.0.1:59435, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-06T08:54:30,917 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43489-0x100668939470000, quorum=127.0.0.1:59435, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/25494438c68b,43489,1733475270773 2024-12-06T08:54:30,917 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32887-0x100668939470003, quorum=127.0.0.1:59435, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-06T08:54:30,917 WARN [master/25494438c68b:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-06T08:54:30,917 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43489-0x100668939470000, quorum=127.0.0.1:59435, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-06T08:54:30,917 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36685-0x100668939470002, quorum=127.0.0.1:59435, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-06T08:54:30,917 INFO [master/25494438c68b:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=25494438c68b,43489,1733475270773 2024-12-06T08:54:30,924 DEBUG [master/25494438c68b:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:45531/user/jenkins/test-data/82bbc703-c541-e4ea-52c7-ef23c1e6fd07/hbase.id] with ID: eec2b7e4-db99-45f8-8bf6-2ebc91ae0b3b 2024-12-06T08:54:30,924 DEBUG [master/25494438c68b:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:45531/user/jenkins/test-data/82bbc703-c541-e4ea-52c7-ef23c1e6fd07/.tmp/hbase.id 2024-12-06T08:54:30,933 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41047 is added to blk_1073741826_1002 (size=42) 2024-12-06T08:54:30,933 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41243 is added to blk_1073741826_1002 (size=42) 2024-12-06T08:54:30,934 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35811 is added to blk_1073741826_1002 (size=42) 2024-12-06T08:54:30,935 DEBUG [master/25494438c68b:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:45531/user/jenkins/test-data/82bbc703-c541-e4ea-52c7-ef23c1e6fd07/.tmp/hbase.id]:[hdfs://localhost:45531/user/jenkins/test-data/82bbc703-c541-e4ea-52c7-ef23c1e6fd07/hbase.id] 2024-12-06T08:54:30,953 INFO [master/25494438c68b:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T08:54:30,954 INFO [master/25494438c68b:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-06T08:54:30,956 INFO [master/25494438c68b:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-12-06T08:54:30,959 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38179-0x100668939470001, quorum=127.0.0.1:59435, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T08:54:30,959 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43489-0x100668939470000, quorum=127.0.0.1:59435, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T08:54:30,959 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36685-0x100668939470002, quorum=127.0.0.1:59435, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T08:54:30,959 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32887-0x100668939470003, quorum=127.0.0.1:59435, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T08:54:30,974 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41243 is added to blk_1073741827_1003 (size=196) 2024-12-06T08:54:30,974 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41047 is added to blk_1073741827_1003 (size=196) 2024-12-06T08:54:30,974 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35811 is added to blk_1073741827_1003 (size=196) 2024-12-06T08:54:30,975 INFO [master/25494438c68b:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-06T08:54:30,976 INFO [master/25494438c68b:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-06T08:54:30,977 INFO [master/25494438c68b:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-06T08:54:30,989 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35811 is added to blk_1073741828_1004 (size=1189) 2024-12-06T08:54:30,990 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41047 is added to blk_1073741828_1004 (size=1189) 2024-12-06T08:54:30,990 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41243 is added to blk_1073741828_1004 (size=1189) 2024-12-06T08:54:30,992 INFO [master/25494438c68b:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:45531/user/jenkins/test-data/82bbc703-c541-e4ea-52c7-ef23c1e6fd07/MasterData/data/master/store 2024-12-06T08:54:31,002 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35811 is added to blk_1073741829_1005 (size=34) 2024-12-06T08:54:31,002 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41243 is added to blk_1073741829_1005 (size=34) 2024-12-06T08:54:31,003 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41047 is added to blk_1073741829_1005 (size=34) 2024-12-06T08:54:31,003 DEBUG [master/25494438c68b:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T08:54:31,003 DEBUG [master/25494438c68b:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-06T08:54:31,003 INFO [master/25494438c68b:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T08:54:31,003 DEBUG [master/25494438c68b:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T08:54:31,004 DEBUG [master/25494438c68b:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-06T08:54:31,004 DEBUG [master/25494438c68b:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T08:54:31,004 INFO [master/25494438c68b:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T08:54:31,004 DEBUG [master/25494438c68b:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733475271003Disabling compacts and flushes for region at 1733475271003Disabling writes for close at 1733475271004 (+1 ms)Writing region close event to WAL at 1733475271004Closed at 1733475271004 2024-12-06T08:54:31,005 WARN [master/25494438c68b:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:45531/user/jenkins/test-data/82bbc703-c541-e4ea-52c7-ef23c1e6fd07/MasterData/data/master/store/.initializing 2024-12-06T08:54:31,005 DEBUG [master/25494438c68b:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:45531/user/jenkins/test-data/82bbc703-c541-e4ea-52c7-ef23c1e6fd07/MasterData/WALs/25494438c68b,43489,1733475270773 2024-12-06T08:54:31,009 INFO [master/25494438c68b:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=25494438c68b%2C43489%2C1733475270773, suffix=, logDir=hdfs://localhost:45531/user/jenkins/test-data/82bbc703-c541-e4ea-52c7-ef23c1e6fd07/MasterData/WALs/25494438c68b,43489,1733475270773, archiveDir=hdfs://localhost:45531/user/jenkins/test-data/82bbc703-c541-e4ea-52c7-ef23c1e6fd07/MasterData/oldWALs, maxLogs=10 2024-12-06T08:54:31,010 INFO [master/25494438c68b:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 25494438c68b%2C43489%2C1733475270773.1733475271010 2024-12-06T08:54:31,024 INFO [master/25494438c68b:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/82bbc703-c541-e4ea-52c7-ef23c1e6fd07/MasterData/WALs/25494438c68b,43489,1733475270773/25494438c68b%2C43489%2C1733475270773.1733475271010 2024-12-06T08:54:31,026 DEBUG [master/25494438c68b:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36149:36149),(127.0.0.1/127.0.0.1:44507:44507),(127.0.0.1/127.0.0.1:42041:42041)] 2024-12-06T08:54:31,026 DEBUG [master/25494438c68b:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-06T08:54:31,026 DEBUG [master/25494438c68b:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T08:54:31,027 DEBUG [master/25494438c68b:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-06T08:54:31,027 DEBUG [master/25494438c68b:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-06T08:54:31,029 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-06T08:54:31,030 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-06T08:54:31,031 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:54:31,031 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T08:54:31,031 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-06T08:54:31,033 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-06T08:54:31,033 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:54:31,034 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-06T08:54:31,034 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-06T08:54:31,037 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-06T08:54:31,037 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:54:31,038 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-06T08:54:31,038 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-06T08:54:31,040 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-06T08:54:31,040 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:54:31,041 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-06T08:54:31,041 DEBUG [master/25494438c68b:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-06T08:54:31,042 DEBUG [master/25494438c68b:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45531/user/jenkins/test-data/82bbc703-c541-e4ea-52c7-ef23c1e6fd07/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-06T08:54:31,042 DEBUG [master/25494438c68b:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45531/user/jenkins/test-data/82bbc703-c541-e4ea-52c7-ef23c1e6fd07/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-06T08:54:31,044 DEBUG [master/25494438c68b:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-06T08:54:31,044 DEBUG [master/25494438c68b:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-06T08:54:31,045 DEBUG [master/25494438c68b:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-06T08:54:31,047 DEBUG [master/25494438c68b:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-06T08:54:31,049 DEBUG [master/25494438c68b:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45531/user/jenkins/test-data/82bbc703-c541-e4ea-52c7-ef23c1e6fd07/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-06T08:54:31,050 INFO [master/25494438c68b:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=72435089, jitterRate=0.07936693727970123}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-06T08:54:31,051 DEBUG [master/25494438c68b:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733475271027Initializing all the Stores at 1733475271028 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733475271028Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733475271028Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733475271028Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733475271028Cleaning up temporary data from old regions at 1733475271044 (+16 ms)Region opened successfully at 1733475271051 (+7 ms) 2024-12-06T08:54:31,052 INFO [master/25494438c68b:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-06T08:54:31,056 DEBUG [master/25494438c68b:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@542c4593, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=25494438c68b/172.17.0.2:0 2024-12-06T08:54:31,058 INFO [master/25494438c68b:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-06T08:54:31,058 INFO [master/25494438c68b:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-06T08:54:31,058 INFO [master/25494438c68b:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-06T08:54:31,058 INFO [master/25494438c68b:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-06T08:54:31,059 INFO [master/25494438c68b:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-12-06T08:54:31,059 INFO [master/25494438c68b:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-12-06T08:54:31,059 INFO [master/25494438c68b:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-06T08:54:31,062 INFO [master/25494438c68b:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-06T08:54:31,063 DEBUG [master/25494438c68b:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43489-0x100668939470000, quorum=127.0.0.1:59435, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-06T08:54:31,064 DEBUG [master/25494438c68b:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-06T08:54:31,065 INFO [master/25494438c68b:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-06T08:54:31,066 DEBUG [master/25494438c68b:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43489-0x100668939470000, quorum=127.0.0.1:59435, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-06T08:54:31,067 DEBUG [master/25494438c68b:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-06T08:54:31,068 INFO [master/25494438c68b:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-06T08:54:31,069 DEBUG [master/25494438c68b:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43489-0x100668939470000, quorum=127.0.0.1:59435, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-06T08:54:31,071 DEBUG [master/25494438c68b:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-06T08:54:31,072 DEBUG [master/25494438c68b:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43489-0x100668939470000, quorum=127.0.0.1:59435, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-06T08:54:31,073 DEBUG [master/25494438c68b:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-06T08:54:31,076 DEBUG [master/25494438c68b:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43489-0x100668939470000, quorum=127.0.0.1:59435, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-06T08:54:31,077 DEBUG [master/25494438c68b:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-06T08:54:31,080 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36685-0x100668939470002, quorum=127.0.0.1:59435, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-06T08:54:31,080 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38179-0x100668939470001, quorum=127.0.0.1:59435, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-06T08:54:31,080 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32887-0x100668939470003, quorum=127.0.0.1:59435, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-06T08:54:31,080 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36685-0x100668939470002, quorum=127.0.0.1:59435, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T08:54:31,080 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38179-0x100668939470001, quorum=127.0.0.1:59435, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T08:54:31,080 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32887-0x100668939470003, quorum=127.0.0.1:59435, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T08:54:31,080 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43489-0x100668939470000, quorum=127.0.0.1:59435, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-06T08:54:31,080 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43489-0x100668939470000, quorum=127.0.0.1:59435, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T08:54:31,081 INFO [master/25494438c68b:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=25494438c68b,43489,1733475270773, sessionid=0x100668939470000, setting cluster-up flag (Was=false) 2024-12-06T08:54:31,085 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32887-0x100668939470003, quorum=127.0.0.1:59435, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T08:54:31,085 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36685-0x100668939470002, quorum=127.0.0.1:59435, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T08:54:31,085 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43489-0x100668939470000, quorum=127.0.0.1:59435, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T08:54:31,085 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38179-0x100668939470001, quorum=127.0.0.1:59435, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T08:54:31,091 DEBUG [master/25494438c68b:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-06T08:54:31,092 DEBUG [master/25494438c68b:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=25494438c68b,43489,1733475270773 2024-12-06T08:54:31,096 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32887-0x100668939470003, quorum=127.0.0.1:59435, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T08:54:31,096 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36685-0x100668939470002, quorum=127.0.0.1:59435, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T08:54:31,096 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43489-0x100668939470000, quorum=127.0.0.1:59435, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T08:54:31,097 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38179-0x100668939470001, quorum=127.0.0.1:59435, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T08:54:31,102 DEBUG [master/25494438c68b:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-06T08:54:31,103 DEBUG [master/25494438c68b:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=25494438c68b,43489,1733475270773 2024-12-06T08:54:31,105 INFO [master/25494438c68b:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:45531/user/jenkins/test-data/82bbc703-c541-e4ea-52c7-ef23c1e6fd07/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-06T08:54:31,108 DEBUG [master/25494438c68b:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-06T08:54:31,108 INFO [master/25494438c68b:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-06T08:54:31,108 INFO [master/25494438c68b:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-06T08:54:31,108 DEBUG [master/25494438c68b:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 25494438c68b,43489,1733475270773 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-06T08:54:31,110 DEBUG [master/25494438c68b:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/25494438c68b:0, corePoolSize=5, maxPoolSize=5 2024-12-06T08:54:31,110 DEBUG [master/25494438c68b:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/25494438c68b:0, corePoolSize=5, maxPoolSize=5 2024-12-06T08:54:31,110 DEBUG [master/25494438c68b:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/25494438c68b:0, corePoolSize=5, maxPoolSize=5 2024-12-06T08:54:31,110 DEBUG [master/25494438c68b:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/25494438c68b:0, corePoolSize=5, maxPoolSize=5 2024-12-06T08:54:31,110 DEBUG [master/25494438c68b:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/25494438c68b:0, corePoolSize=10, maxPoolSize=10 2024-12-06T08:54:31,110 DEBUG [master/25494438c68b:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/25494438c68b:0, corePoolSize=1, maxPoolSize=1 2024-12-06T08:54:31,110 DEBUG [master/25494438c68b:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/25494438c68b:0, corePoolSize=2, maxPoolSize=2 2024-12-06T08:54:31,111 DEBUG [master/25494438c68b:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/25494438c68b:0, corePoolSize=1, maxPoolSize=1 2024-12-06T08:54:31,111 INFO [master/25494438c68b:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733475301111 2024-12-06T08:54:31,112 INFO [master/25494438c68b:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-06T08:54:31,112 INFO [master/25494438c68b:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-06T08:54:31,112 INFO [master/25494438c68b:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-06T08:54:31,112 INFO [master/25494438c68b:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-06T08:54:31,112 INFO [master/25494438c68b:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-06T08:54:31,112 INFO [master/25494438c68b:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-06T08:54:31,113 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-06T08:54:31,113 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-06T08:54:31,115 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:54:31,115 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-06T08:54:31,116 INFO [master/25494438c68b:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-06T08:54:31,116 INFO [master/25494438c68b:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-06T08:54:31,116 INFO [master/25494438c68b:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-06T08:54:31,116 INFO [master/25494438c68b:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-06T08:54:31,122 INFO [master/25494438c68b:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-06T08:54:31,122 INFO [master/25494438c68b:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-06T08:54:31,122 DEBUG [master/25494438c68b:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/25494438c68b:0:becomeActiveMaster-HFileCleaner.large.0-1733475271122,5,FailOnTimeoutGroup] 2024-12-06T08:54:31,122 DEBUG [master/25494438c68b:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/25494438c68b:0:becomeActiveMaster-HFileCleaner.small.0-1733475271122,5,FailOnTimeoutGroup] 2024-12-06T08:54:31,122 INFO [master/25494438c68b:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-06T08:54:31,123 INFO [master/25494438c68b:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-06T08:54:31,123 INFO [master/25494438c68b:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-06T08:54:31,123 INFO [master/25494438c68b:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-06T08:54:31,130 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41047 is added to blk_1073741831_1007 (size=1321) 2024-12-06T08:54:31,130 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41243 is added to blk_1073741831_1007 (size=1321) 2024-12-06T08:54:31,130 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35811 is added to blk_1073741831_1007 (size=1321) 2024-12-06T08:54:31,132 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:45531/user/jenkins/test-data/82bbc703-c541-e4ea-52c7-ef23c1e6fd07/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-06T08:54:31,132 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:45531/user/jenkins/test-data/82bbc703-c541-e4ea-52c7-ef23c1e6fd07 2024-12-06T08:54:31,140 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41047 is added to blk_1073741832_1008 (size=32) 2024-12-06T08:54:31,141 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41243 is added to blk_1073741832_1008 (size=32) 2024-12-06T08:54:31,141 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35811 is added to blk_1073741832_1008 (size=32) 2024-12-06T08:54:31,142 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T08:54:31,144 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-06T08:54:31,145 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-06T08:54:31,146 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:54:31,146 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T08:54:31,146 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-06T08:54:31,148 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-06T08:54:31,148 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:54:31,148 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T08:54:31,148 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-06T08:54:31,150 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-06T08:54:31,150 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:54:31,151 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T08:54:31,151 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-06T08:54:31,152 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-06T08:54:31,152 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:54:31,153 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T08:54:31,153 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-06T08:54:31,154 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45531/user/jenkins/test-data/82bbc703-c541-e4ea-52c7-ef23c1e6fd07/data/hbase/meta/1588230740 2024-12-06T08:54:31,155 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45531/user/jenkins/test-data/82bbc703-c541-e4ea-52c7-ef23c1e6fd07/data/hbase/meta/1588230740 2024-12-06T08:54:31,157 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-06T08:54:31,157 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-06T08:54:31,157 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-06T08:54:31,159 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-06T08:54:31,162 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45531/user/jenkins/test-data/82bbc703-c541-e4ea-52c7-ef23c1e6fd07/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-06T08:54:31,162 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=75111296, jitterRate=0.11924552917480469}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-06T08:54:31,163 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733475271142Initializing all the Stores at 1733475271143 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733475271143Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733475271144 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733475271144Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733475271144Cleaning up temporary data from old regions at 1733475271157 (+13 ms)Region opened successfully at 1733475271163 (+6 ms) 2024-12-06T08:54:31,163 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-06T08:54:31,163 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-06T08:54:31,163 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-06T08:54:31,163 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-06T08:54:31,163 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-06T08:54:31,163 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-06T08:54:31,164 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733475271163Disabling compacts and flushes for region at 1733475271163Disabling writes for close at 1733475271163Writing region close event to WAL at 1733475271163Closed at 1733475271163 2024-12-06T08:54:31,166 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-06T08:54:31,166 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-06T08:54:31,166 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-06T08:54:31,168 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-06T08:54:31,170 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-06T08:54:31,199 INFO [RS:1;25494438c68b:36685 {}] regionserver.HRegionServer(746): ClusterId : eec2b7e4-db99-45f8-8bf6-2ebc91ae0b3b 2024-12-06T08:54:31,199 INFO [RS:0;25494438c68b:38179 {}] regionserver.HRegionServer(746): ClusterId : eec2b7e4-db99-45f8-8bf6-2ebc91ae0b3b 2024-12-06T08:54:31,199 INFO [RS:2;25494438c68b:32887 {}] regionserver.HRegionServer(746): ClusterId : eec2b7e4-db99-45f8-8bf6-2ebc91ae0b3b 2024-12-06T08:54:31,199 DEBUG [RS:1;25494438c68b:36685 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-06T08:54:31,199 DEBUG [RS:2;25494438c68b:32887 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-06T08:54:31,199 DEBUG [RS:0;25494438c68b:38179 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-06T08:54:31,202 DEBUG [RS:0;25494438c68b:38179 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-06T08:54:31,202 DEBUG [RS:2;25494438c68b:32887 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-06T08:54:31,202 DEBUG [RS:1;25494438c68b:36685 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-06T08:54:31,202 DEBUG [RS:2;25494438c68b:32887 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-06T08:54:31,202 DEBUG [RS:0;25494438c68b:38179 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-06T08:54:31,202 DEBUG [RS:1;25494438c68b:36685 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-06T08:54:31,206 DEBUG [RS:0;25494438c68b:38179 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-06T08:54:31,206 DEBUG [RS:1;25494438c68b:36685 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-06T08:54:31,206 DEBUG [RS:2;25494438c68b:32887 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-06T08:54:31,207 DEBUG [RS:2;25494438c68b:32887 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@15234871, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=25494438c68b/172.17.0.2:0 2024-12-06T08:54:31,207 DEBUG [RS:0;25494438c68b:38179 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1818e18, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=25494438c68b/172.17.0.2:0 2024-12-06T08:54:31,207 DEBUG [RS:1;25494438c68b:36685 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@58f5cb72, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=25494438c68b/172.17.0.2:0 2024-12-06T08:54:31,219 DEBUG [RS:2;25494438c68b:32887 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:2;25494438c68b:32887 2024-12-06T08:54:31,219 DEBUG [RS:1;25494438c68b:36685 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;25494438c68b:36685 2024-12-06T08:54:31,219 INFO [RS:2;25494438c68b:32887 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-06T08:54:31,219 INFO [RS:2;25494438c68b:32887 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-06T08:54:31,219 DEBUG [RS:2;25494438c68b:32887 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-06T08:54:31,219 INFO [RS:1;25494438c68b:36685 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-06T08:54:31,219 INFO [RS:1;25494438c68b:36685 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-06T08:54:31,219 DEBUG [RS:1;25494438c68b:36685 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-06T08:54:31,220 INFO [RS:2;25494438c68b:32887 {}] regionserver.HRegionServer(2659): reportForDuty to master=25494438c68b,43489,1733475270773 with port=32887, startcode=1733475270882 2024-12-06T08:54:31,220 INFO [RS:1;25494438c68b:36685 {}] regionserver.HRegionServer(2659): reportForDuty to master=25494438c68b,43489,1733475270773 with port=36685, startcode=1733475270852 2024-12-06T08:54:31,220 DEBUG [RS:2;25494438c68b:32887 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-06T08:54:31,220 DEBUG [RS:1;25494438c68b:36685 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-06T08:54:31,223 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41473, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.4 (auth:SIMPLE), service=RegionServerStatusService 2024-12-06T08:54:31,223 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55687, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.5 (auth:SIMPLE), service=RegionServerStatusService 2024-12-06T08:54:31,223 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43489 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 25494438c68b,36685,1733475270852 2024-12-06T08:54:31,224 DEBUG [RS:0;25494438c68b:38179 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;25494438c68b:38179 2024-12-06T08:54:31,224 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43489 {}] master.ServerManager(517): Registering regionserver=25494438c68b,36685,1733475270852 2024-12-06T08:54:31,224 INFO [RS:0;25494438c68b:38179 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-06T08:54:31,224 INFO [RS:0;25494438c68b:38179 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-06T08:54:31,224 DEBUG [RS:0;25494438c68b:38179 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-06T08:54:31,225 INFO [RS:0;25494438c68b:38179 {}] regionserver.HRegionServer(2659): reportForDuty to master=25494438c68b,43489,1733475270773 with port=38179, startcode=1733475270822 2024-12-06T08:54:31,225 DEBUG [RS:0;25494438c68b:38179 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-06T08:54:31,226 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43489 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 25494438c68b,32887,1733475270882 2024-12-06T08:54:31,226 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43489 {}] master.ServerManager(517): Registering regionserver=25494438c68b,32887,1733475270882 2024-12-06T08:54:31,226 DEBUG [RS:1;25494438c68b:36685 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:45531/user/jenkins/test-data/82bbc703-c541-e4ea-52c7-ef23c1e6fd07 2024-12-06T08:54:31,226 DEBUG [RS:1;25494438c68b:36685 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:45531 2024-12-06T08:54:31,227 DEBUG [RS:1;25494438c68b:36685 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-06T08:54:31,227 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60835, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.3 (auth:SIMPLE), service=RegionServerStatusService 2024-12-06T08:54:31,228 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43489 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 25494438c68b,38179,1733475270822 2024-12-06T08:54:31,228 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43489-0x100668939470000, quorum=127.0.0.1:59435, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-06T08:54:31,228 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43489 {}] master.ServerManager(517): Registering regionserver=25494438c68b,38179,1733475270822 2024-12-06T08:54:31,229 DEBUG [RS:2;25494438c68b:32887 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:45531/user/jenkins/test-data/82bbc703-c541-e4ea-52c7-ef23c1e6fd07 2024-12-06T08:54:31,229 DEBUG [RS:2;25494438c68b:32887 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:45531 2024-12-06T08:54:31,229 DEBUG [RS:2;25494438c68b:32887 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-06T08:54:31,229 DEBUG [RS:1;25494438c68b:36685 {}] zookeeper.ZKUtil(111): regionserver:36685-0x100668939470002, quorum=127.0.0.1:59435, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/25494438c68b,36685,1733475270852 2024-12-06T08:54:31,229 WARN [RS:1;25494438c68b:36685 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-06T08:54:31,230 INFO [RS:1;25494438c68b:36685 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-06T08:54:31,230 DEBUG [RS:1;25494438c68b:36685 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:45531/user/jenkins/test-data/82bbc703-c541-e4ea-52c7-ef23c1e6fd07/WALs/25494438c68b,36685,1733475270852 2024-12-06T08:54:31,231 DEBUG [RS:0;25494438c68b:38179 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:45531/user/jenkins/test-data/82bbc703-c541-e4ea-52c7-ef23c1e6fd07 2024-12-06T08:54:31,231 DEBUG [RS:0;25494438c68b:38179 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:45531 2024-12-06T08:54:31,231 DEBUG [RS:0;25494438c68b:38179 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-06T08:54:31,235 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [25494438c68b,32887,1733475270882] 2024-12-06T08:54:31,235 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [25494438c68b,36685,1733475270852] 2024-12-06T08:54:31,237 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43489-0x100668939470000, quorum=127.0.0.1:59435, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-06T08:54:31,237 DEBUG [RS:2;25494438c68b:32887 {}] zookeeper.ZKUtil(111): regionserver:32887-0x100668939470003, quorum=127.0.0.1:59435, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/25494438c68b,32887,1733475270882 2024-12-06T08:54:31,237 WARN [RS:2;25494438c68b:32887 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-06T08:54:31,237 INFO [RS:2;25494438c68b:32887 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-06T08:54:31,237 DEBUG [RS:2;25494438c68b:32887 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:45531/user/jenkins/test-data/82bbc703-c541-e4ea-52c7-ef23c1e6fd07/WALs/25494438c68b,32887,1733475270882 2024-12-06T08:54:31,237 DEBUG [RS:0;25494438c68b:38179 {}] zookeeper.ZKUtil(111): regionserver:38179-0x100668939470001, quorum=127.0.0.1:59435, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/25494438c68b,38179,1733475270822 2024-12-06T08:54:31,237 WARN [RS:0;25494438c68b:38179 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-06T08:54:31,237 INFO [RS:0;25494438c68b:38179 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-06T08:54:31,237 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [25494438c68b,38179,1733475270822] 2024-12-06T08:54:31,238 DEBUG [RS:0;25494438c68b:38179 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:45531/user/jenkins/test-data/82bbc703-c541-e4ea-52c7-ef23c1e6fd07/WALs/25494438c68b,38179,1733475270822 2024-12-06T08:54:31,241 INFO [RS:1;25494438c68b:36685 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-06T08:54:31,243 INFO [RS:2;25494438c68b:32887 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-06T08:54:31,243 INFO [RS:0;25494438c68b:38179 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-06T08:54:31,244 INFO [RS:1;25494438c68b:36685 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-06T08:54:31,245 INFO [RS:1;25494438c68b:36685 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-06T08:54:31,245 INFO [RS:1;25494438c68b:36685 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-06T08:54:31,246 INFO [RS:2;25494438c68b:32887 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-06T08:54:31,247 INFO [RS:1;25494438c68b:36685 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-06T08:54:31,248 INFO [RS:2;25494438c68b:32887 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-06T08:54:31,248 INFO [RS:2;25494438c68b:32887 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-06T08:54:31,248 INFO [RS:1;25494438c68b:36685 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-06T08:54:31,248 INFO [RS:1;25494438c68b:36685 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-06T08:54:31,249 DEBUG [RS:1;25494438c68b:36685 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/25494438c68b:0, corePoolSize=1, maxPoolSize=1 2024-12-06T08:54:31,249 DEBUG [RS:1;25494438c68b:36685 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/25494438c68b:0, corePoolSize=1, maxPoolSize=1 2024-12-06T08:54:31,249 DEBUG [RS:1;25494438c68b:36685 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/25494438c68b:0, corePoolSize=1, maxPoolSize=1 2024-12-06T08:54:31,249 DEBUG [RS:1;25494438c68b:36685 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/25494438c68b:0, corePoolSize=1, maxPoolSize=1 2024-12-06T08:54:31,249 DEBUG [RS:1;25494438c68b:36685 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/25494438c68b:0, corePoolSize=1, maxPoolSize=1 2024-12-06T08:54:31,249 DEBUG [RS:1;25494438c68b:36685 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/25494438c68b:0, corePoolSize=2, maxPoolSize=2 2024-12-06T08:54:31,249 DEBUG [RS:1;25494438c68b:36685 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/25494438c68b:0, corePoolSize=1, maxPoolSize=1 2024-12-06T08:54:31,249 DEBUG [RS:1;25494438c68b:36685 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/25494438c68b:0, corePoolSize=1, maxPoolSize=1 2024-12-06T08:54:31,249 DEBUG [RS:1;25494438c68b:36685 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/25494438c68b:0, corePoolSize=1, maxPoolSize=1 2024-12-06T08:54:31,249 DEBUG [RS:1;25494438c68b:36685 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/25494438c68b:0, corePoolSize=1, maxPoolSize=1 2024-12-06T08:54:31,249 DEBUG [RS:1;25494438c68b:36685 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/25494438c68b:0, corePoolSize=1, maxPoolSize=1 2024-12-06T08:54:31,250 DEBUG [RS:1;25494438c68b:36685 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/25494438c68b:0, corePoolSize=1, maxPoolSize=1 2024-12-06T08:54:31,250 DEBUG [RS:1;25494438c68b:36685 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/25494438c68b:0, corePoolSize=3, maxPoolSize=3 2024-12-06T08:54:31,250 DEBUG [RS:1;25494438c68b:36685 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/25494438c68b:0, corePoolSize=3, maxPoolSize=3 2024-12-06T08:54:31,252 INFO [RS:0;25494438c68b:38179 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-06T08:54:31,255 INFO [RS:2;25494438c68b:32887 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-06T08:54:31,255 INFO [RS:0;25494438c68b:38179 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-06T08:54:31,255 INFO [RS:0;25494438c68b:38179 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-06T08:54:31,255 INFO [RS:1;25494438c68b:36685 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-06T08:54:31,255 INFO [RS:1;25494438c68b:36685 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-06T08:54:31,256 INFO [RS:1;25494438c68b:36685 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-06T08:54:31,256 INFO [RS:1;25494438c68b:36685 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-06T08:54:31,256 INFO [RS:1;25494438c68b:36685 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-06T08:54:31,256 INFO [RS:1;25494438c68b:36685 {}] hbase.ChoreService(168): Chore ScheduledChore name=25494438c68b,36685,1733475270852-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-06T08:54:31,256 INFO [RS:0;25494438c68b:38179 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-06T08:54:31,256 INFO [RS:2;25494438c68b:32887 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-06T08:54:31,256 INFO [RS:2;25494438c68b:32887 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-06T08:54:31,256 DEBUG [RS:2;25494438c68b:32887 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/25494438c68b:0, corePoolSize=1, maxPoolSize=1 2024-12-06T08:54:31,257 DEBUG [RS:2;25494438c68b:32887 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/25494438c68b:0, corePoolSize=1, maxPoolSize=1 2024-12-06T08:54:31,257 DEBUG [RS:2;25494438c68b:32887 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/25494438c68b:0, corePoolSize=1, maxPoolSize=1 2024-12-06T08:54:31,257 DEBUG [RS:2;25494438c68b:32887 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/25494438c68b:0, corePoolSize=1, maxPoolSize=1 2024-12-06T08:54:31,257 DEBUG [RS:2;25494438c68b:32887 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/25494438c68b:0, corePoolSize=1, maxPoolSize=1 2024-12-06T08:54:31,257 INFO [RS:0;25494438c68b:38179 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-06T08:54:31,257 DEBUG [RS:2;25494438c68b:32887 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/25494438c68b:0, corePoolSize=2, maxPoolSize=2 2024-12-06T08:54:31,257 DEBUG [RS:2;25494438c68b:32887 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/25494438c68b:0, corePoolSize=1, maxPoolSize=1 2024-12-06T08:54:31,257 INFO [RS:0;25494438c68b:38179 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-06T08:54:31,257 DEBUG [RS:2;25494438c68b:32887 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/25494438c68b:0, corePoolSize=1, maxPoolSize=1 2024-12-06T08:54:31,257 DEBUG [RS:0;25494438c68b:38179 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/25494438c68b:0, corePoolSize=1, maxPoolSize=1 2024-12-06T08:54:31,257 DEBUG [RS:2;25494438c68b:32887 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/25494438c68b:0, corePoolSize=1, maxPoolSize=1 2024-12-06T08:54:31,257 DEBUG [RS:0;25494438c68b:38179 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/25494438c68b:0, corePoolSize=1, maxPoolSize=1 2024-12-06T08:54:31,258 DEBUG [RS:2;25494438c68b:32887 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/25494438c68b:0, corePoolSize=1, maxPoolSize=1 2024-12-06T08:54:31,258 DEBUG [RS:0;25494438c68b:38179 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/25494438c68b:0, corePoolSize=1, maxPoolSize=1 2024-12-06T08:54:31,258 DEBUG [RS:0;25494438c68b:38179 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/25494438c68b:0, corePoolSize=1, maxPoolSize=1 2024-12-06T08:54:31,258 DEBUG [RS:2;25494438c68b:32887 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/25494438c68b:0, corePoolSize=1, maxPoolSize=1 2024-12-06T08:54:31,258 DEBUG [RS:0;25494438c68b:38179 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/25494438c68b:0, corePoolSize=1, maxPoolSize=1 2024-12-06T08:54:31,258 DEBUG [RS:2;25494438c68b:32887 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/25494438c68b:0, corePoolSize=1, maxPoolSize=1 2024-12-06T08:54:31,258 DEBUG [RS:0;25494438c68b:38179 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/25494438c68b:0, corePoolSize=2, maxPoolSize=2 2024-12-06T08:54:31,258 DEBUG [RS:2;25494438c68b:32887 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/25494438c68b:0, corePoolSize=3, maxPoolSize=3 2024-12-06T08:54:31,258 DEBUG [RS:0;25494438c68b:38179 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/25494438c68b:0, corePoolSize=1, maxPoolSize=1 2024-12-06T08:54:31,258 DEBUG [RS:0;25494438c68b:38179 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/25494438c68b:0, corePoolSize=1, maxPoolSize=1 2024-12-06T08:54:31,258 DEBUG [RS:2;25494438c68b:32887 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/25494438c68b:0, corePoolSize=3, maxPoolSize=3 2024-12-06T08:54:31,258 DEBUG [RS:0;25494438c68b:38179 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/25494438c68b:0, corePoolSize=1, maxPoolSize=1 2024-12-06T08:54:31,258 DEBUG [RS:0;25494438c68b:38179 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/25494438c68b:0, corePoolSize=1, maxPoolSize=1 2024-12-06T08:54:31,258 DEBUG [RS:0;25494438c68b:38179 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/25494438c68b:0, corePoolSize=1, maxPoolSize=1 2024-12-06T08:54:31,258 DEBUG [RS:0;25494438c68b:38179 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/25494438c68b:0, corePoolSize=1, maxPoolSize=1 2024-12-06T08:54:31,258 DEBUG [RS:0;25494438c68b:38179 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/25494438c68b:0, corePoolSize=3, maxPoolSize=3 2024-12-06T08:54:31,258 DEBUG [RS:0;25494438c68b:38179 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/25494438c68b:0, corePoolSize=3, maxPoolSize=3 2024-12-06T08:54:31,261 INFO [RS:2;25494438c68b:32887 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-06T08:54:31,261 INFO [RS:2;25494438c68b:32887 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-06T08:54:31,261 INFO [RS:0;25494438c68b:38179 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-06T08:54:31,261 INFO [RS:2;25494438c68b:32887 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-06T08:54:31,261 INFO [RS:0;25494438c68b:38179 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-06T08:54:31,261 INFO [RS:0;25494438c68b:38179 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-06T08:54:31,261 INFO [RS:2;25494438c68b:32887 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-06T08:54:31,261 INFO [RS:0;25494438c68b:38179 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-06T08:54:31,261 INFO [RS:2;25494438c68b:32887 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-06T08:54:31,261 INFO [RS:0;25494438c68b:38179 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-06T08:54:31,261 INFO [RS:2;25494438c68b:32887 {}] hbase.ChoreService(168): Chore ScheduledChore name=25494438c68b,32887,1733475270882-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-06T08:54:31,261 INFO [RS:0;25494438c68b:38179 {}] hbase.ChoreService(168): Chore ScheduledChore name=25494438c68b,38179,1733475270822-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-06T08:54:31,277 INFO [RS:2;25494438c68b:32887 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-06T08:54:31,277 INFO [RS:2;25494438c68b:32887 {}] hbase.ChoreService(168): Chore ScheduledChore name=25494438c68b,32887,1733475270882-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-06T08:54:31,277 INFO [RS:2;25494438c68b:32887 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-06T08:54:31,277 INFO [RS:0;25494438c68b:38179 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-06T08:54:31,277 INFO [RS:2;25494438c68b:32887 {}] regionserver.Replication(171): 25494438c68b,32887,1733475270882 started 2024-12-06T08:54:31,277 INFO [RS:0;25494438c68b:38179 {}] hbase.ChoreService(168): Chore ScheduledChore name=25494438c68b,38179,1733475270822-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-06T08:54:31,278 INFO [RS:0;25494438c68b:38179 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-06T08:54:31,278 INFO [RS:0;25494438c68b:38179 {}] regionserver.Replication(171): 25494438c68b,38179,1733475270822 started 2024-12-06T08:54:31,279 INFO [RS:1;25494438c68b:36685 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-06T08:54:31,279 INFO [RS:1;25494438c68b:36685 {}] hbase.ChoreService(168): Chore ScheduledChore name=25494438c68b,36685,1733475270852-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-06T08:54:31,279 INFO [RS:1;25494438c68b:36685 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-06T08:54:31,279 INFO [RS:1;25494438c68b:36685 {}] regionserver.Replication(171): 25494438c68b,36685,1733475270852 started 2024-12-06T08:54:31,292 INFO [RS:2;25494438c68b:32887 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-06T08:54:31,292 INFO [RS:2;25494438c68b:32887 {}] regionserver.HRegionServer(1482): Serving as 25494438c68b,32887,1733475270882, RpcServer on 25494438c68b/172.17.0.2:32887, sessionid=0x100668939470003 2024-12-06T08:54:31,293 DEBUG [RS:2;25494438c68b:32887 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-06T08:54:31,293 DEBUG [RS:2;25494438c68b:32887 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 25494438c68b,32887,1733475270882 2024-12-06T08:54:31,293 DEBUG [RS:2;25494438c68b:32887 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '25494438c68b,32887,1733475270882' 2024-12-06T08:54:31,293 DEBUG [RS:2;25494438c68b:32887 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-06T08:54:31,293 INFO [RS:1;25494438c68b:36685 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-06T08:54:31,293 INFO [RS:1;25494438c68b:36685 {}] regionserver.HRegionServer(1482): Serving as 25494438c68b,36685,1733475270852, RpcServer on 25494438c68b/172.17.0.2:36685, sessionid=0x100668939470002 2024-12-06T08:54:31,293 DEBUG [RS:1;25494438c68b:36685 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-06T08:54:31,293 DEBUG [RS:1;25494438c68b:36685 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 25494438c68b,36685,1733475270852 2024-12-06T08:54:31,293 DEBUG [RS:1;25494438c68b:36685 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '25494438c68b,36685,1733475270852' 2024-12-06T08:54:31,293 DEBUG [RS:1;25494438c68b:36685 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-06T08:54:31,294 DEBUG [RS:2;25494438c68b:32887 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-06T08:54:31,294 DEBUG [RS:1;25494438c68b:36685 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-06T08:54:31,294 DEBUG [RS:2;25494438c68b:32887 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-06T08:54:31,294 DEBUG [RS:2;25494438c68b:32887 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-06T08:54:31,294 DEBUG [RS:2;25494438c68b:32887 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 25494438c68b,32887,1733475270882 2024-12-06T08:54:31,295 DEBUG [RS:2;25494438c68b:32887 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '25494438c68b,32887,1733475270882' 2024-12-06T08:54:31,295 DEBUG [RS:2;25494438c68b:32887 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-06T08:54:31,295 DEBUG [RS:1;25494438c68b:36685 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-06T08:54:31,295 DEBUG [RS:1;25494438c68b:36685 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-06T08:54:31,295 DEBUG [RS:1;25494438c68b:36685 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 25494438c68b,36685,1733475270852 2024-12-06T08:54:31,295 DEBUG [RS:1;25494438c68b:36685 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '25494438c68b,36685,1733475270852' 2024-12-06T08:54:31,295 DEBUG [RS:1;25494438c68b:36685 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-06T08:54:31,295 DEBUG [RS:2;25494438c68b:32887 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-06T08:54:31,296 DEBUG [RS:2;25494438c68b:32887 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-06T08:54:31,296 DEBUG [RS:1;25494438c68b:36685 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-06T08:54:31,296 INFO [RS:2;25494438c68b:32887 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-06T08:54:31,296 INFO [RS:2;25494438c68b:32887 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-06T08:54:31,297 DEBUG [RS:1;25494438c68b:36685 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-06T08:54:31,297 INFO [RS:1;25494438c68b:36685 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-06T08:54:31,297 INFO [RS:1;25494438c68b:36685 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-06T08:54:31,299 INFO [RS:0;25494438c68b:38179 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-06T08:54:31,299 INFO [RS:0;25494438c68b:38179 {}] regionserver.HRegionServer(1482): Serving as 25494438c68b,38179,1733475270822, RpcServer on 25494438c68b/172.17.0.2:38179, sessionid=0x100668939470001 2024-12-06T08:54:31,300 DEBUG [RS:0;25494438c68b:38179 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-06T08:54:31,300 DEBUG [RS:0;25494438c68b:38179 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 25494438c68b,38179,1733475270822 2024-12-06T08:54:31,300 DEBUG [RS:0;25494438c68b:38179 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '25494438c68b,38179,1733475270822' 2024-12-06T08:54:31,300 DEBUG [RS:0;25494438c68b:38179 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-06T08:54:31,300 DEBUG [RS:0;25494438c68b:38179 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-06T08:54:31,301 DEBUG [RS:0;25494438c68b:38179 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-06T08:54:31,301 DEBUG [RS:0;25494438c68b:38179 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-06T08:54:31,301 DEBUG [RS:0;25494438c68b:38179 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 25494438c68b,38179,1733475270822 2024-12-06T08:54:31,301 DEBUG [RS:0;25494438c68b:38179 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '25494438c68b,38179,1733475270822' 2024-12-06T08:54:31,301 DEBUG [RS:0;25494438c68b:38179 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-06T08:54:31,301 DEBUG [RS:0;25494438c68b:38179 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-06T08:54:31,302 DEBUG [RS:0;25494438c68b:38179 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-06T08:54:31,302 INFO [RS:0;25494438c68b:38179 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-06T08:54:31,302 INFO [RS:0;25494438c68b:38179 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-06T08:54:31,320 WARN [25494438c68b:43489 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-06T08:54:31,399 INFO [RS:2;25494438c68b:32887 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=25494438c68b%2C32887%2C1733475270882, suffix=, logDir=hdfs://localhost:45531/user/jenkins/test-data/82bbc703-c541-e4ea-52c7-ef23c1e6fd07/WALs/25494438c68b,32887,1733475270882, archiveDir=hdfs://localhost:45531/user/jenkins/test-data/82bbc703-c541-e4ea-52c7-ef23c1e6fd07/oldWALs, maxLogs=32 2024-12-06T08:54:31,400 INFO [RS:1;25494438c68b:36685 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=25494438c68b%2C36685%2C1733475270852, suffix=, logDir=hdfs://localhost:45531/user/jenkins/test-data/82bbc703-c541-e4ea-52c7-ef23c1e6fd07/WALs/25494438c68b,36685,1733475270852, archiveDir=hdfs://localhost:45531/user/jenkins/test-data/82bbc703-c541-e4ea-52c7-ef23c1e6fd07/oldWALs, maxLogs=32 2024-12-06T08:54:31,402 INFO [RS:2;25494438c68b:32887 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 25494438c68b%2C32887%2C1733475270882.1733475271401 2024-12-06T08:54:31,402 INFO [RS:1;25494438c68b:36685 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 25494438c68b%2C36685%2C1733475270852.1733475271401 2024-12-06T08:54:31,405 INFO [RS:0;25494438c68b:38179 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=25494438c68b%2C38179%2C1733475270822, suffix=, logDir=hdfs://localhost:45531/user/jenkins/test-data/82bbc703-c541-e4ea-52c7-ef23c1e6fd07/WALs/25494438c68b,38179,1733475270822, archiveDir=hdfs://localhost:45531/user/jenkins/test-data/82bbc703-c541-e4ea-52c7-ef23c1e6fd07/oldWALs, maxLogs=32 2024-12-06T08:54:31,406 INFO [RS:0;25494438c68b:38179 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 25494438c68b%2C38179%2C1733475270822.1733475271406 2024-12-06T08:54:31,419 INFO [RS:1;25494438c68b:36685 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/82bbc703-c541-e4ea-52c7-ef23c1e6fd07/WALs/25494438c68b,36685,1733475270852/25494438c68b%2C36685%2C1733475270852.1733475271401 2024-12-06T08:54:31,420 INFO [RS:2;25494438c68b:32887 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/82bbc703-c541-e4ea-52c7-ef23c1e6fd07/WALs/25494438c68b,32887,1733475270882/25494438c68b%2C32887%2C1733475270882.1733475271401 2024-12-06T08:54:31,425 DEBUG [RS:1;25494438c68b:36685 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36149:36149),(127.0.0.1/127.0.0.1:44507:44507),(127.0.0.1/127.0.0.1:42041:42041)] 2024-12-06T08:54:31,425 INFO [RS:0;25494438c68b:38179 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/82bbc703-c541-e4ea-52c7-ef23c1e6fd07/WALs/25494438c68b,38179,1733475270822/25494438c68b%2C38179%2C1733475270822.1733475271406 2024-12-06T08:54:31,425 DEBUG [RS:2;25494438c68b:32887 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44507:44507),(127.0.0.1/127.0.0.1:36149:36149),(127.0.0.1/127.0.0.1:42041:42041)] 2024-12-06T08:54:31,431 DEBUG [RS:0;25494438c68b:38179 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42041:42041),(127.0.0.1/127.0.0.1:44507:44507),(127.0.0.1/127.0.0.1:36149:36149)] 2024-12-06T08:54:31,570 DEBUG [25494438c68b:43489 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=3, allServersCount=3 2024-12-06T08:54:31,571 DEBUG [25494438c68b:43489 {}] balancer.BalancerClusterState(204): Hosts are {25494438c68b=0} racks are {/default-rack=0} 2024-12-06T08:54:31,574 DEBUG [25494438c68b:43489 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-06T08:54:31,574 DEBUG [25494438c68b:43489 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-06T08:54:31,574 DEBUG [25494438c68b:43489 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-06T08:54:31,574 DEBUG [25494438c68b:43489 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-06T08:54:31,574 DEBUG [25494438c68b:43489 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-06T08:54:31,574 DEBUG [25494438c68b:43489 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-06T08:54:31,574 INFO [25494438c68b:43489 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-06T08:54:31,574 INFO [25494438c68b:43489 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-06T08:54:31,574 INFO [25494438c68b:43489 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-06T08:54:31,574 DEBUG [25494438c68b:43489 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-06T08:54:31,574 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=25494438c68b,38179,1733475270822 2024-12-06T08:54:31,576 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 25494438c68b,38179,1733475270822, state=OPENING 2024-12-06T08:54:31,578 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-06T08:54:31,580 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32887-0x100668939470003, quorum=127.0.0.1:59435, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T08:54:31,580 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36685-0x100668939470002, quorum=127.0.0.1:59435, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T08:54:31,580 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38179-0x100668939470001, quorum=127.0.0.1:59435, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T08:54:31,580 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43489-0x100668939470000, quorum=127.0.0.1:59435, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T08:54:31,581 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-06T08:54:31,581 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-06T08:54:31,581 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-06T08:54:31,581 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=25494438c68b,38179,1733475270822}] 2024-12-06T08:54:31,581 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-06T08:54:31,581 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-06T08:54:31,735 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-06T08:54:31,737 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49519, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-06T08:54:31,742 INFO [RS_OPEN_META-regionserver/25494438c68b:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-06T08:54:31,743 INFO [RS_OPEN_META-regionserver/25494438c68b:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-06T08:54:31,746 INFO [RS_OPEN_META-regionserver/25494438c68b:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=25494438c68b%2C38179%2C1733475270822.meta, suffix=.meta, logDir=hdfs://localhost:45531/user/jenkins/test-data/82bbc703-c541-e4ea-52c7-ef23c1e6fd07/WALs/25494438c68b,38179,1733475270822, archiveDir=hdfs://localhost:45531/user/jenkins/test-data/82bbc703-c541-e4ea-52c7-ef23c1e6fd07/oldWALs, maxLogs=32 2024-12-06T08:54:31,746 INFO [RS_OPEN_META-regionserver/25494438c68b:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 25494438c68b%2C38179%2C1733475270822.meta.1733475271746.meta 2024-12-06T08:54:31,756 INFO [RS_OPEN_META-regionserver/25494438c68b:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/82bbc703-c541-e4ea-52c7-ef23c1e6fd07/WALs/25494438c68b,38179,1733475270822/25494438c68b%2C38179%2C1733475270822.meta.1733475271746.meta 2024-12-06T08:54:31,760 DEBUG [RS_OPEN_META-regionserver/25494438c68b:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36149:36149),(127.0.0.1/127.0.0.1:44507:44507),(127.0.0.1/127.0.0.1:42041:42041)] 2024-12-06T08:54:31,761 DEBUG [RS_OPEN_META-regionserver/25494438c68b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-06T08:54:31,762 DEBUG [RS_OPEN_META-regionserver/25494438c68b:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-06T08:54:31,762 DEBUG [RS_OPEN_META-regionserver/25494438c68b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-06T08:54:31,762 INFO [RS_OPEN_META-regionserver/25494438c68b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-06T08:54:31,762 DEBUG [RS_OPEN_META-regionserver/25494438c68b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-06T08:54:31,762 DEBUG [RS_OPEN_META-regionserver/25494438c68b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T08:54:31,762 DEBUG [RS_OPEN_META-regionserver/25494438c68b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-06T08:54:31,762 DEBUG [RS_OPEN_META-regionserver/25494438c68b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-06T08:54:31,764 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-06T08:54:31,766 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-06T08:54:31,766 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:54:31,766 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T08:54:31,766 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-06T08:54:31,767 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-06T08:54:31,767 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:54:31,768 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T08:54:31,768 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-06T08:54:31,769 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-06T08:54:31,769 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:54:31,769 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T08:54:31,769 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-06T08:54:31,770 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-06T08:54:31,770 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:54:31,770 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T08:54:31,771 DEBUG [RS_OPEN_META-regionserver/25494438c68b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-06T08:54:31,771 DEBUG [RS_OPEN_META-regionserver/25494438c68b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45531/user/jenkins/test-data/82bbc703-c541-e4ea-52c7-ef23c1e6fd07/data/hbase/meta/1588230740 2024-12-06T08:54:31,773 DEBUG [RS_OPEN_META-regionserver/25494438c68b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45531/user/jenkins/test-data/82bbc703-c541-e4ea-52c7-ef23c1e6fd07/data/hbase/meta/1588230740 2024-12-06T08:54:31,774 DEBUG [RS_OPEN_META-regionserver/25494438c68b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-06T08:54:31,774 DEBUG [RS_OPEN_META-regionserver/25494438c68b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-06T08:54:31,775 DEBUG [RS_OPEN_META-regionserver/25494438c68b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-06T08:54:31,776 DEBUG [RS_OPEN_META-regionserver/25494438c68b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-06T08:54:31,777 INFO [RS_OPEN_META-regionserver/25494438c68b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=75227294, jitterRate=0.12097403407096863}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-06T08:54:31,778 DEBUG [RS_OPEN_META-regionserver/25494438c68b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-06T08:54:31,779 DEBUG [RS_OPEN_META-regionserver/25494438c68b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733475271763Writing region info on filesystem at 1733475271763Initializing all the Stores at 1733475271764 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733475271764Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733475271764Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733475271764Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733475271764Cleaning up temporary data from old regions at 1733475271774 (+10 ms)Running coprocessor post-open hooks at 1733475271778 (+4 ms)Region opened successfully at 1733475271779 (+1 ms) 2024-12-06T08:54:31,780 INFO [RS_OPEN_META-regionserver/25494438c68b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733475271735 2024-12-06T08:54:31,784 DEBUG [RS_OPEN_META-regionserver/25494438c68b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-06T08:54:31,784 INFO [RS_OPEN_META-regionserver/25494438c68b:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-06T08:54:31,785 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=25494438c68b,38179,1733475270822 2024-12-06T08:54:31,787 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 25494438c68b,38179,1733475270822, state=OPEN 2024-12-06T08:54:31,789 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43489-0x100668939470000, quorum=127.0.0.1:59435, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-06T08:54:31,789 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36685-0x100668939470002, quorum=127.0.0.1:59435, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-06T08:54:31,789 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32887-0x100668939470003, quorum=127.0.0.1:59435, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-06T08:54:31,789 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=25494438c68b,38179,1733475270822 2024-12-06T08:54:31,789 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38179-0x100668939470001, quorum=127.0.0.1:59435, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-06T08:54:31,789 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-06T08:54:31,789 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-06T08:54:31,789 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-06T08:54:31,789 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-06T08:54:31,794 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-06T08:54:31,794 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=25494438c68b,38179,1733475270822 in 208 msec 2024-12-06T08:54:31,802 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-06T08:54:31,802 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 628 msec 2024-12-06T08:54:31,803 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-06T08:54:31,803 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-06T08:54:31,806 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-06T08:54:31,806 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=25494438c68b,38179,1733475270822, seqNum=-1] 2024-12-06T08:54:31,806 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-06T08:54:31,808 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54801, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-06T08:54:31,817 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 709 msec 2024-12-06T08:54:31,817 INFO [master/25494438c68b:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733475271817, completionTime=-1 2024-12-06T08:54:31,818 INFO [master/25494438c68b:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=3; waited=0ms, expected min=3 server(s), max=3 server(s), master is running 2024-12-06T08:54:31,818 DEBUG [master/25494438c68b:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-06T08:54:31,821 INFO [master/25494438c68b:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=3 2024-12-06T08:54:31,821 INFO [master/25494438c68b:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733475331821 2024-12-06T08:54:31,821 INFO [master/25494438c68b:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733475391821 2024-12-06T08:54:31,821 INFO [master/25494438c68b:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 3 msec 2024-12-06T08:54:31,821 INFO [master/25494438c68b:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=25494438c68b,43489,1733475270773-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-06T08:54:31,821 INFO [master/25494438c68b:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=25494438c68b,43489,1733475270773-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-06T08:54:31,822 INFO [master/25494438c68b:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=25494438c68b,43489,1733475270773-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-06T08:54:31,822 INFO [master/25494438c68b:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-25494438c68b:43489, period=300000, unit=MILLISECONDS is enabled. 2024-12-06T08:54:31,822 INFO [master/25494438c68b:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-06T08:54:31,823 INFO [master/25494438c68b:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-06T08:54:31,825 DEBUG [master/25494438c68b:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-06T08:54:31,829 INFO [master/25494438c68b:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.912sec 2024-12-06T08:54:31,833 INFO [master/25494438c68b:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-06T08:54:31,833 INFO [master/25494438c68b:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-06T08:54:31,833 INFO [master/25494438c68b:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-06T08:54:31,833 INFO [master/25494438c68b:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-06T08:54:31,833 INFO [master/25494438c68b:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-06T08:54:31,833 INFO [master/25494438c68b:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=25494438c68b,43489,1733475270773-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-06T08:54:31,833 INFO [master/25494438c68b:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=25494438c68b,43489,1733475270773-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-06T08:54:31,836 DEBUG [master/25494438c68b:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-06T08:54:31,836 INFO [master/25494438c68b:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-06T08:54:31,836 INFO [master/25494438c68b:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=25494438c68b,43489,1733475270773-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-06T08:54:31,899 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@319a90d2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T08:54:31,899 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 25494438c68b,43489,-1 for getting cluster id 2024-12-06T08:54:31,900 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-06T08:54:31,901 DEBUG [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'eec2b7e4-db99-45f8-8bf6-2ebc91ae0b3b' 2024-12-06T08:54:31,902 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-06T08:54:31,902 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "eec2b7e4-db99-45f8-8bf6-2ebc91ae0b3b" 2024-12-06T08:54:31,903 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7e01ce6b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T08:54:31,903 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [25494438c68b,43489,-1] 2024-12-06T08:54:31,903 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-06T08:54:31,904 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T08:54:31,905 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54936, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-06T08:54:31,907 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1b719fd5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T08:54:31,907 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-06T08:54:31,909 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=25494438c68b,38179,1733475270822, seqNum=-1] 2024-12-06T08:54:31,909 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-06T08:54:31,911 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36294, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-06T08:54:31,913 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=25494438c68b,43489,1733475270773 2024-12-06T08:54:31,914 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-06T08:54:31,915 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] client.AsyncConnectionImpl(321): The fetched master address is 25494438c68b,43489,1733475270773 2024-12-06T08:54:31,915 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@98df274 2024-12-06T08:54:31,915 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-06T08:54:31,917 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54952, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-06T08:54:31,918 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43489 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-06T08:54:31,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43489 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC 2024-12-06T08:54:31,922 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_PRE_OPERATION 2024-12-06T08:54:31,922 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:54:31,922 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43489 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestHBaseWalOnEC" procId is: 4 2024-12-06T08:54:31,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43489 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-06T08:54:31,924 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-06T08:54:31,933 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41243 is added to blk_1073741837_1013 (size=392) 2024-12-06T08:54:31,934 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41047 is added to blk_1073741837_1013 (size=392) 2024-12-06T08:54:31,934 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35811 is added to blk_1073741837_1013 (size=392) 2024-12-06T08:54:31,936 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 8bf250a2ae458760d1ca550401bce3d0, NAME => 'TestHBaseWalOnEC,,1733475271917.8bf250a2ae458760d1ca550401bce3d0.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:45531/user/jenkins/test-data/82bbc703-c541-e4ea-52c7-ef23c1e6fd07 2024-12-06T08:54:31,946 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41243 is added to blk_1073741838_1014 (size=51) 2024-12-06T08:54:31,947 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35811 is added to blk_1073741838_1014 (size=51) 2024-12-06T08:54:31,947 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41047 is added to blk_1073741838_1014 (size=51) 2024-12-06T08:54:31,948 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1733475271917.8bf250a2ae458760d1ca550401bce3d0.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T08:54:31,948 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1722): Closing 8bf250a2ae458760d1ca550401bce3d0, disabling compactions & flushes 2024-12-06T08:54:31,948 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1733475271917.8bf250a2ae458760d1ca550401bce3d0. 2024-12-06T08:54:31,948 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1733475271917.8bf250a2ae458760d1ca550401bce3d0. 2024-12-06T08:54:31,948 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1733475271917.8bf250a2ae458760d1ca550401bce3d0. after waiting 0 ms 2024-12-06T08:54:31,948 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1733475271917.8bf250a2ae458760d1ca550401bce3d0. 2024-12-06T08:54:31,948 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1733475271917.8bf250a2ae458760d1ca550401bce3d0. 2024-12-06T08:54:31,948 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1676): Region close journal for 8bf250a2ae458760d1ca550401bce3d0: Waiting for close lock at 1733475271948Disabling compacts and flushes for region at 1733475271948Disabling writes for close at 1733475271948Writing region close event to WAL at 1733475271948Closed at 1733475271948 2024-12-06T08:54:31,950 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ADD_TO_META 2024-12-06T08:54:31,951 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestHBaseWalOnEC,,1733475271917.8bf250a2ae458760d1ca550401bce3d0.","families":{"info":[{"qualifier":"regioninfo","vlen":50,"tag":[],"timestamp":"1733475271950"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733475271950"}]},"ts":"1733475271950"} 2024-12-06T08:54:31,954 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-06T08:54:31,956 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-06T08:54:31,956 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733475271956"}]},"ts":"1733475271956"} 2024-12-06T08:54:31,959 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLING in hbase:meta 2024-12-06T08:54:31,959 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {25494438c68b=0} racks are {/default-rack=0} 2024-12-06T08:54:31,960 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-06T08:54:31,960 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-06T08:54:31,960 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-06T08:54:31,960 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-06T08:54:31,960 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-06T08:54:31,960 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-06T08:54:31,960 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-06T08:54:31,960 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-06T08:54:31,960 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-06T08:54:31,960 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-06T08:54:31,961 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=8bf250a2ae458760d1ca550401bce3d0, ASSIGN}] 2024-12-06T08:54:31,963 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=8bf250a2ae458760d1ca550401bce3d0, ASSIGN 2024-12-06T08:54:31,964 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=8bf250a2ae458760d1ca550401bce3d0, ASSIGN; state=OFFLINE, location=25494438c68b,38179,1733475270822; forceNewPlan=false, retain=false 2024-12-06T08:54:32,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43489 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-06T08:54:32,115 INFO [25494438c68b:43489 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-06T08:54:32,115 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=8bf250a2ae458760d1ca550401bce3d0, regionState=OPENING, regionLocation=25494438c68b,38179,1733475270822 2024-12-06T08:54:32,119 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-10-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=8bf250a2ae458760d1ca550401bce3d0, ASSIGN because future has completed 2024-12-06T08:54:32,120 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 8bf250a2ae458760d1ca550401bce3d0, server=25494438c68b,38179,1733475270822}] 2024-12-06T08:54:32,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43489 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-06T08:54:32,280 INFO [RS_OPEN_REGION-regionserver/25494438c68b:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestHBaseWalOnEC,,1733475271917.8bf250a2ae458760d1ca550401bce3d0. 2024-12-06T08:54:32,280 DEBUG [RS_OPEN_REGION-regionserver/25494438c68b:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 8bf250a2ae458760d1ca550401bce3d0, NAME => 'TestHBaseWalOnEC,,1733475271917.8bf250a2ae458760d1ca550401bce3d0.', STARTKEY => '', ENDKEY => ''} 2024-12-06T08:54:32,281 DEBUG [RS_OPEN_REGION-regionserver/25494438c68b:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestHBaseWalOnEC 8bf250a2ae458760d1ca550401bce3d0 2024-12-06T08:54:32,281 DEBUG [RS_OPEN_REGION-regionserver/25494438c68b:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1733475271917.8bf250a2ae458760d1ca550401bce3d0.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T08:54:32,281 DEBUG [RS_OPEN_REGION-regionserver/25494438c68b:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 8bf250a2ae458760d1ca550401bce3d0 2024-12-06T08:54:32,281 DEBUG [RS_OPEN_REGION-regionserver/25494438c68b:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 8bf250a2ae458760d1ca550401bce3d0 2024-12-06T08:54:32,283 INFO [StoreOpener-8bf250a2ae458760d1ca550401bce3d0-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 8bf250a2ae458760d1ca550401bce3d0 2024-12-06T08:54:32,285 INFO [StoreOpener-8bf250a2ae458760d1ca550401bce3d0-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 8bf250a2ae458760d1ca550401bce3d0 columnFamilyName cf 2024-12-06T08:54:32,285 DEBUG [StoreOpener-8bf250a2ae458760d1ca550401bce3d0-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:54:32,285 INFO [StoreOpener-8bf250a2ae458760d1ca550401bce3d0-1 {}] regionserver.HStore(327): Store=8bf250a2ae458760d1ca550401bce3d0/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-06T08:54:32,285 DEBUG [RS_OPEN_REGION-regionserver/25494438c68b:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 8bf250a2ae458760d1ca550401bce3d0 2024-12-06T08:54:32,286 DEBUG [RS_OPEN_REGION-regionserver/25494438c68b:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45531/user/jenkins/test-data/82bbc703-c541-e4ea-52c7-ef23c1e6fd07/data/default/TestHBaseWalOnEC/8bf250a2ae458760d1ca550401bce3d0 2024-12-06T08:54:32,287 DEBUG [RS_OPEN_REGION-regionserver/25494438c68b:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45531/user/jenkins/test-data/82bbc703-c541-e4ea-52c7-ef23c1e6fd07/data/default/TestHBaseWalOnEC/8bf250a2ae458760d1ca550401bce3d0 2024-12-06T08:54:32,287 DEBUG [RS_OPEN_REGION-regionserver/25494438c68b:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 8bf250a2ae458760d1ca550401bce3d0 2024-12-06T08:54:32,287 DEBUG [RS_OPEN_REGION-regionserver/25494438c68b:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 8bf250a2ae458760d1ca550401bce3d0 2024-12-06T08:54:32,289 DEBUG [RS_OPEN_REGION-regionserver/25494438c68b:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 8bf250a2ae458760d1ca550401bce3d0 2024-12-06T08:54:32,291 DEBUG [RS_OPEN_REGION-regionserver/25494438c68b:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45531/user/jenkins/test-data/82bbc703-c541-e4ea-52c7-ef23c1e6fd07/data/default/TestHBaseWalOnEC/8bf250a2ae458760d1ca550401bce3d0/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-06T08:54:32,292 INFO [RS_OPEN_REGION-regionserver/25494438c68b:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 8bf250a2ae458760d1ca550401bce3d0; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=66509415, jitterRate=-0.008932486176490784}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-06T08:54:32,292 DEBUG [RS_OPEN_REGION-regionserver/25494438c68b:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 8bf250a2ae458760d1ca550401bce3d0 2024-12-06T08:54:32,293 DEBUG [RS_OPEN_REGION-regionserver/25494438c68b:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 8bf250a2ae458760d1ca550401bce3d0: Running coprocessor pre-open hook at 1733475272281Writing region info on filesystem at 1733475272281Initializing all the Stores at 1733475272283 (+2 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733475272283Cleaning up temporary data from old regions at 1733475272287 (+4 ms)Running coprocessor post-open hooks at 1733475272292 (+5 ms)Region opened successfully at 1733475272293 (+1 ms) 2024-12-06T08:54:32,295 INFO [RS_OPEN_REGION-regionserver/25494438c68b:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestHBaseWalOnEC,,1733475271917.8bf250a2ae458760d1ca550401bce3d0., pid=6, masterSystemTime=1733475272274 2024-12-06T08:54:32,298 DEBUG [RS_OPEN_REGION-regionserver/25494438c68b:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestHBaseWalOnEC,,1733475271917.8bf250a2ae458760d1ca550401bce3d0. 2024-12-06T08:54:32,298 INFO [RS_OPEN_REGION-regionserver/25494438c68b:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestHBaseWalOnEC,,1733475271917.8bf250a2ae458760d1ca550401bce3d0. 2024-12-06T08:54:32,300 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=8bf250a2ae458760d1ca550401bce3d0, regionState=OPEN, openSeqNum=2, regionLocation=25494438c68b,38179,1733475270822 2024-12-06T08:54:32,303 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-10-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 8bf250a2ae458760d1ca550401bce3d0, server=25494438c68b,38179,1733475270822 because future has completed 2024-12-06T08:54:32,309 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-06T08:54:32,309 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 8bf250a2ae458760d1ca550401bce3d0, server=25494438c68b,38179,1733475270822 in 185 msec 2024-12-06T08:54:32,313 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-06T08:54:32,313 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=8bf250a2ae458760d1ca550401bce3d0, ASSIGN in 348 msec 2024-12-06T08:54:32,315 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-06T08:54:32,315 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733475272315"}]},"ts":"1733475272315"} 2024-12-06T08:54:32,319 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLED in hbase:meta 2024-12-06T08:54:32,320 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_POST_OPERATION 2024-12-06T08:54:32,324 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC in 402 msec 2024-12-06T08:54:32,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43489 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-06T08:54:32,548 INFO [RPCClient-NioEventLoopGroup-6-9 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestHBaseWalOnEC completed 2024-12-06T08:54:32,548 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table TestHBaseWalOnEC get assigned. Timeout = 60000ms 2024-12-06T08:54:32,548 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-06T08:54:32,551 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table TestHBaseWalOnEC assigned to meta. Checking AM states. 2024-12-06T08:54:32,552 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-06T08:54:32,552 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table TestHBaseWalOnEC assigned. 2024-12-06T08:54:32,555 DEBUG [RPCClient-NioEventLoopGroup-6-8 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestHBaseWalOnEC', row='row', locateType=CURRENT is [region=TestHBaseWalOnEC,,1733475271917.8bf250a2ae458760d1ca550401bce3d0., hostname=25494438c68b,38179,1733475270822, seqNum=2] 2024-12-06T08:54:32,559 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43489 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestHBaseWalOnEC 2024-12-06T08:54:32,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43489 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC 2024-12-06T08:54:32,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43489 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-06T08:54:32,563 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_PREPARE 2024-12-06T08:54:32,565 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-06T08:54:32,565 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-06T08:54:32,625 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-06T08:54:32,634 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T08:54:32,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43489 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-06T08:54:32,698 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T08:54:32,699 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T08:54:32,720 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38179 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-12-06T08:54:32,720 DEBUG [RS_FLUSH_OPERATIONS-regionserver/25494438c68b:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestHBaseWalOnEC,,1733475271917.8bf250a2ae458760d1ca550401bce3d0. 2024-12-06T08:54:32,721 INFO [RS_FLUSH_OPERATIONS-regionserver/25494438c68b:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing 8bf250a2ae458760d1ca550401bce3d0 1/1 column families, dataSize=32 B heapSize=360 B 2024-12-06T08:54:32,739 DEBUG [RS_FLUSH_OPERATIONS-regionserver/25494438c68b:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45531/user/jenkins/test-data/82bbc703-c541-e4ea-52c7-ef23c1e6fd07/data/default/TestHBaseWalOnEC/8bf250a2ae458760d1ca550401bce3d0/.tmp/cf/488df5c4129c4a678b91f3317da821c5 is 36, key is row/cf:cq/1733475272557/Put/seqid=0 2024-12-06T08:54:32,750 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41243 is added to blk_1073741839_1015 (size=4787) 2024-12-06T08:54:32,750 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41047 is added to blk_1073741839_1015 (size=4787) 2024-12-06T08:54:32,751 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35811 is added to blk_1073741839_1015 (size=4787) 2024-12-06T08:54:32,751 INFO [RS_FLUSH_OPERATIONS-regionserver/25494438c68b:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=32 B at sequenceid=5 (bloomFilter=false), to=hdfs://localhost:45531/user/jenkins/test-data/82bbc703-c541-e4ea-52c7-ef23c1e6fd07/data/default/TestHBaseWalOnEC/8bf250a2ae458760d1ca550401bce3d0/.tmp/cf/488df5c4129c4a678b91f3317da821c5 2024-12-06T08:54:32,760 DEBUG [RS_FLUSH_OPERATIONS-regionserver/25494438c68b:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45531/user/jenkins/test-data/82bbc703-c541-e4ea-52c7-ef23c1e6fd07/data/default/TestHBaseWalOnEC/8bf250a2ae458760d1ca550401bce3d0/.tmp/cf/488df5c4129c4a678b91f3317da821c5 as hdfs://localhost:45531/user/jenkins/test-data/82bbc703-c541-e4ea-52c7-ef23c1e6fd07/data/default/TestHBaseWalOnEC/8bf250a2ae458760d1ca550401bce3d0/cf/488df5c4129c4a678b91f3317da821c5 2024-12-06T08:54:32,770 INFO [RS_FLUSH_OPERATIONS-regionserver/25494438c68b:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45531/user/jenkins/test-data/82bbc703-c541-e4ea-52c7-ef23c1e6fd07/data/default/TestHBaseWalOnEC/8bf250a2ae458760d1ca550401bce3d0/cf/488df5c4129c4a678b91f3317da821c5, entries=1, sequenceid=5, filesize=4.7 K 2024-12-06T08:54:32,771 INFO [RS_FLUSH_OPERATIONS-regionserver/25494438c68b:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~32 B/32, heapSize ~344 B/344, currentSize=0 B/0 for 8bf250a2ae458760d1ca550401bce3d0 in 50ms, sequenceid=5, compaction requested=false 2024-12-06T08:54:32,771 DEBUG [RS_FLUSH_OPERATIONS-regionserver/25494438c68b:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for 8bf250a2ae458760d1ca550401bce3d0: 2024-12-06T08:54:32,771 DEBUG [RS_FLUSH_OPERATIONS-regionserver/25494438c68b:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestHBaseWalOnEC,,1733475271917.8bf250a2ae458760d1ca550401bce3d0. 2024-12-06T08:54:32,772 DEBUG [RS_FLUSH_OPERATIONS-regionserver/25494438c68b:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-12-06T08:54:32,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43489 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-12-06T08:54:32,778 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-12-06T08:54:32,778 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 209 msec 2024-12-06T08:54:32,782 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC in 220 msec 2024-12-06T08:54:32,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43489 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-06T08:54:32,878 INFO [RPCClient-NioEventLoopGroup-6-9 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestHBaseWalOnEC completed 2024-12-06T08:54:32,883 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-06T08:54:32,883 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-06T08:54:32,883 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-06T08:54:32,883 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T08:54:32,884 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T08:54:32,884 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-06T08:54:32,884 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-06T08:54:32,884 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1546559215, stopped=false 2024-12-06T08:54:32,884 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=25494438c68b,43489,1733475270773 2024-12-06T08:54:32,886 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32887-0x100668939470003, quorum=127.0.0.1:59435, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-06T08:54:32,886 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43489-0x100668939470000, quorum=127.0.0.1:59435, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-06T08:54:32,886 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38179-0x100668939470001, quorum=127.0.0.1:59435, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-06T08:54:32,886 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32887-0x100668939470003, quorum=127.0.0.1:59435, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T08:54:32,886 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43489-0x100668939470000, quorum=127.0.0.1:59435, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T08:54:32,887 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36685-0x100668939470002, quorum=127.0.0.1:59435, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-06T08:54:32,887 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36685-0x100668939470002, quorum=127.0.0.1:59435, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T08:54:32,887 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-06T08:54:32,887 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38179-0x100668939470001, quorum=127.0.0.1:59435, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T08:54:32,887 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-06T08:54:32,887 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-06T08:54:32,888 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T08:54:32,888 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:32887-0x100668939470003, quorum=127.0.0.1:59435, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-06T08:54:32,888 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:38179-0x100668939470001, quorum=127.0.0.1:59435, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-06T08:54:32,888 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '25494438c68b,38179,1733475270822' ***** 2024-12-06T08:54:32,888 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-06T08:54:32,888 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '25494438c68b,36685,1733475270852' ***** 2024-12-06T08:54:32,888 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-06T08:54:32,888 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '25494438c68b,32887,1733475270882' ***** 2024-12-06T08:54:32,888 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-06T08:54:32,888 INFO [RS:1;25494438c68b:36685 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-06T08:54:32,888 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-06T08:54:32,889 INFO [RS:0;25494438c68b:38179 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-06T08:54:32,889 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-06T08:54:32,889 INFO [RS:0;25494438c68b:38179 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-06T08:54:32,889 INFO [RS:0;25494438c68b:38179 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-06T08:54:32,889 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:43489-0x100668939470000, quorum=127.0.0.1:59435, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-06T08:54:32,889 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:36685-0x100668939470002, quorum=127.0.0.1:59435, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-06T08:54:32,889 INFO [RS:0;25494438c68b:38179 {}] regionserver.HRegionServer(3091): Received CLOSE for 8bf250a2ae458760d1ca550401bce3d0 2024-12-06T08:54:32,889 INFO [RS:2;25494438c68b:32887 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-06T08:54:32,889 INFO [RS:1;25494438c68b:36685 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-06T08:54:32,889 INFO [RS:1;25494438c68b:36685 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-06T08:54:32,889 INFO [RS:1;25494438c68b:36685 {}] regionserver.HRegionServer(959): stopping server 25494438c68b,36685,1733475270852 2024-12-06T08:54:32,889 INFO [RS:1;25494438c68b:36685 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-06T08:54:32,889 INFO [RS:2;25494438c68b:32887 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-06T08:54:32,890 INFO [RS:1;25494438c68b:36685 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;25494438c68b:36685. 2024-12-06T08:54:32,890 INFO [RS:2;25494438c68b:32887 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-06T08:54:32,890 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-06T08:54:32,890 INFO [RS:2;25494438c68b:32887 {}] regionserver.HRegionServer(959): stopping server 25494438c68b,32887,1733475270882 2024-12-06T08:54:32,890 INFO [RS:2;25494438c68b:32887 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-06T08:54:32,890 INFO [RS:2;25494438c68b:32887 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:2;25494438c68b:32887. 2024-12-06T08:54:32,890 DEBUG [RS:1;25494438c68b:36685 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-06T08:54:32,890 DEBUG [RS:1;25494438c68b:36685 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T08:54:32,890 INFO [RS:0;25494438c68b:38179 {}] regionserver.HRegionServer(959): stopping server 25494438c68b,38179,1733475270822 2024-12-06T08:54:32,890 INFO [RS:0;25494438c68b:38179 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-06T08:54:32,890 DEBUG [RS:2;25494438c68b:32887 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-06T08:54:32,890 INFO [RS:1;25494438c68b:36685 {}] regionserver.HRegionServer(976): stopping server 25494438c68b,36685,1733475270852; all regions closed. 2024-12-06T08:54:32,890 INFO [RS:0;25494438c68b:38179 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;25494438c68b:38179. 2024-12-06T08:54:32,890 DEBUG [RS:2;25494438c68b:32887 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T08:54:32,890 DEBUG [RS_CLOSE_REGION-regionserver/25494438c68b:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 8bf250a2ae458760d1ca550401bce3d0, disabling compactions & flushes 2024-12-06T08:54:32,890 DEBUG [RS:0;25494438c68b:38179 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-06T08:54:32,890 INFO [RS_CLOSE_REGION-regionserver/25494438c68b:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1733475271917.8bf250a2ae458760d1ca550401bce3d0. 2024-12-06T08:54:32,890 INFO [RS:2;25494438c68b:32887 {}] regionserver.HRegionServer(976): stopping server 25494438c68b,32887,1733475270882; all regions closed. 2024-12-06T08:54:32,890 DEBUG [RS:0;25494438c68b:38179 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T08:54:32,891 DEBUG [RS_CLOSE_REGION-regionserver/25494438c68b:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1733475271917.8bf250a2ae458760d1ca550401bce3d0. 2024-12-06T08:54:32,891 DEBUG [RS_CLOSE_REGION-regionserver/25494438c68b:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1733475271917.8bf250a2ae458760d1ca550401bce3d0. after waiting 0 ms 2024-12-06T08:54:32,891 DEBUG [RS_CLOSE_REGION-regionserver/25494438c68b:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1733475271917.8bf250a2ae458760d1ca550401bce3d0. 2024-12-06T08:54:32,891 INFO [RS:0;25494438c68b:38179 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-06T08:54:32,891 INFO [RS:0;25494438c68b:38179 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-06T08:54:32,891 INFO [RS:0;25494438c68b:38179 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-06T08:54:32,891 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T08:54:32,891 INFO [RS:0;25494438c68b:38179 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-06T08:54:32,891 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T08:54:32,891 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T08:54:32,891 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T08:54:32,891 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T08:54:32,891 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T08:54:32,891 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T08:54:32,891 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T08:54:32,892 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T08:54:32,892 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T08:54:32,892 INFO [RS:0;25494438c68b:38179 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-12-06T08:54:32,892 DEBUG [RS:0;25494438c68b:38179 {}] regionserver.HRegionServer(1325): Online Regions={8bf250a2ae458760d1ca550401bce3d0=TestHBaseWalOnEC,,1733475271917.8bf250a2ae458760d1ca550401bce3d0., 1588230740=hbase:meta,,1.1588230740} 2024-12-06T08:54:32,892 DEBUG [RS:0;25494438c68b:38179 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 8bf250a2ae458760d1ca550401bce3d0 2024-12-06T08:54:32,892 DEBUG [RS_CLOSE_META-regionserver/25494438c68b:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-06T08:54:32,892 INFO [RS_CLOSE_META-regionserver/25494438c68b:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-06T08:54:32,892 DEBUG [RS_CLOSE_META-regionserver/25494438c68b:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-06T08:54:32,892 DEBUG [RS_CLOSE_META-regionserver/25494438c68b:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-06T08:54:32,892 DEBUG [RS_CLOSE_META-regionserver/25494438c68b:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-06T08:54:32,892 INFO [RS_CLOSE_META-regionserver/25494438c68b:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.34 KB heapSize=3.38 KB 2024-12-06T08:54:32,898 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41047 is added to blk_1073741834_1010 (size=93) 2024-12-06T08:54:32,898 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35811 is added to blk_1073741834_1010 (size=93) 2024-12-06T08:54:32,898 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41243 is added to blk_1073741834_1010 (size=93) 2024-12-06T08:54:32,900 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41047 is added to blk_1073741833_1009 (size=93) 2024-12-06T08:54:32,900 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41243 is added to blk_1073741833_1009 (size=93) 2024-12-06T08:54:32,901 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35811 is added to blk_1073741833_1009 (size=93) 2024-12-06T08:54:32,905 DEBUG [RS_CLOSE_REGION-regionserver/25494438c68b:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45531/user/jenkins/test-data/82bbc703-c541-e4ea-52c7-ef23c1e6fd07/data/default/TestHBaseWalOnEC/8bf250a2ae458760d1ca550401bce3d0/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-06T08:54:32,905 DEBUG [RS:1;25494438c68b:36685 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/82bbc703-c541-e4ea-52c7-ef23c1e6fd07/oldWALs 2024-12-06T08:54:32,905 INFO [RS:1;25494438c68b:36685 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 25494438c68b%2C36685%2C1733475270852:(num 1733475271401) 2024-12-06T08:54:32,905 DEBUG [RS:1;25494438c68b:36685 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T08:54:32,905 INFO [RS:1;25494438c68b:36685 {}] regionserver.LeaseManager(133): Closed leases 2024-12-06T08:54:32,905 INFO [RS:1;25494438c68b:36685 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-06T08:54:32,906 DEBUG [RS:2;25494438c68b:32887 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/82bbc703-c541-e4ea-52c7-ef23c1e6fd07/oldWALs 2024-12-06T08:54:32,906 INFO [RS:2;25494438c68b:32887 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 25494438c68b%2C32887%2C1733475270882:(num 1733475271401) 2024-12-06T08:54:32,906 DEBUG [RS:2;25494438c68b:32887 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T08:54:32,906 INFO [RS:2;25494438c68b:32887 {}] regionserver.LeaseManager(133): Closed leases 2024-12-06T08:54:32,906 INFO [RS:1;25494438c68b:36685 {}] hbase.ChoreService(370): Chore service for: regionserver/25494438c68b:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-12-06T08:54:32,906 INFO [RS:2;25494438c68b:32887 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-06T08:54:32,906 INFO [RS:1;25494438c68b:36685 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-06T08:54:32,906 INFO [regionserver/25494438c68b:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-06T08:54:32,906 INFO [RS:1;25494438c68b:36685 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-06T08:54:32,906 INFO [RS:1;25494438c68b:36685 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-06T08:54:32,906 INFO [RS:1;25494438c68b:36685 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-06T08:54:32,906 INFO [RS:2;25494438c68b:32887 {}] hbase.ChoreService(370): Chore service for: regionserver/25494438c68b:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-06T08:54:32,906 INFO [RS_CLOSE_REGION-regionserver/25494438c68b:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1733475271917.8bf250a2ae458760d1ca550401bce3d0. 2024-12-06T08:54:32,906 INFO [RS:1;25494438c68b:36685 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:36685 2024-12-06T08:54:32,906 INFO [RS:2;25494438c68b:32887 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-06T08:54:32,906 DEBUG [RS_CLOSE_REGION-regionserver/25494438c68b:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 8bf250a2ae458760d1ca550401bce3d0: Waiting for close lock at 1733475272890Running coprocessor pre-close hooks at 1733475272890Disabling compacts and flushes for region at 1733475272890Disabling writes for close at 1733475272891 (+1 ms)Writing region close event to WAL at 1733475272896 (+5 ms)Running coprocessor post-close hooks at 1733475272906 (+10 ms)Closed at 1733475272906 2024-12-06T08:54:32,906 INFO [regionserver/25494438c68b:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-06T08:54:32,906 DEBUG [RS_CLOSE_REGION-regionserver/25494438c68b:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestHBaseWalOnEC,,1733475271917.8bf250a2ae458760d1ca550401bce3d0. 2024-12-06T08:54:32,906 INFO [RS:2;25494438c68b:32887 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-06T08:54:32,907 INFO [RS:2;25494438c68b:32887 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-06T08:54:32,907 INFO [RS:2;25494438c68b:32887 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-06T08:54:32,907 INFO [RS:2;25494438c68b:32887 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:32887 2024-12-06T08:54:32,909 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43489-0x100668939470000, quorum=127.0.0.1:59435, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-06T08:54:32,909 INFO [RS:1;25494438c68b:36685 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-06T08:54:32,909 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36685-0x100668939470002, quorum=127.0.0.1:59435, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/25494438c68b,36685,1733475270852 2024-12-06T08:54:32,910 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32887-0x100668939470003, quorum=127.0.0.1:59435, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/25494438c68b,32887,1733475270882 2024-12-06T08:54:32,910 INFO [RS:2;25494438c68b:32887 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-06T08:54:32,912 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [25494438c68b,32887,1733475270882] 2024-12-06T08:54:32,914 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/25494438c68b,32887,1733475270882 already deleted, retry=false 2024-12-06T08:54:32,914 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 25494438c68b,32887,1733475270882 expired; onlineServers=2 2024-12-06T08:54:32,914 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [25494438c68b,36685,1733475270852] 2024-12-06T08:54:32,915 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/25494438c68b,36685,1733475270852 already deleted, retry=false 2024-12-06T08:54:32,915 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 25494438c68b,36685,1733475270852 expired; onlineServers=1 2024-12-06T08:54:32,924 DEBUG [RS_CLOSE_META-regionserver/25494438c68b:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45531/user/jenkins/test-data/82bbc703-c541-e4ea-52c7-ef23c1e6fd07/data/hbase/meta/1588230740/.tmp/info/efc1536b47f442f2b36b1f76d53abe7a is 153, key is TestHBaseWalOnEC,,1733475271917.8bf250a2ae458760d1ca550401bce3d0./info:regioninfo/1733475272299/Put/seqid=0 2024-12-06T08:54:32,932 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41047 is added to blk_1073741840_1016 (size=6637) 2024-12-06T08:54:32,932 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35811 is added to blk_1073741840_1016 (size=6637) 2024-12-06T08:54:32,936 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41243 is added to blk_1073741840_1016 (size=6637) 2024-12-06T08:54:32,936 INFO [RS_CLOSE_META-regionserver/25494438c68b:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.18 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:45531/user/jenkins/test-data/82bbc703-c541-e4ea-52c7-ef23c1e6fd07/data/hbase/meta/1588230740/.tmp/info/efc1536b47f442f2b36b1f76d53abe7a 2024-12-06T08:54:32,957 INFO [regionserver/25494438c68b:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-06T08:54:32,963 INFO [regionserver/25494438c68b:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-06T08:54:32,963 INFO [regionserver/25494438c68b:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-06T08:54:32,963 DEBUG [RS_CLOSE_META-regionserver/25494438c68b:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45531/user/jenkins/test-data/82bbc703-c541-e4ea-52c7-ef23c1e6fd07/data/hbase/meta/1588230740/.tmp/ns/806a69ced35743fbbbd8d532837481da is 43, key is default/ns:d/1733475271809/Put/seqid=0 2024-12-06T08:54:32,970 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41243 is added to blk_1073741841_1017 (size=5153) 2024-12-06T08:54:32,971 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35811 is added to blk_1073741841_1017 (size=5153) 2024-12-06T08:54:32,971 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41047 is added to blk_1073741841_1017 (size=5153) 2024-12-06T08:54:32,975 INFO [RS_CLOSE_META-regionserver/25494438c68b:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:45531/user/jenkins/test-data/82bbc703-c541-e4ea-52c7-ef23c1e6fd07/data/hbase/meta/1588230740/.tmp/ns/806a69ced35743fbbbd8d532837481da 2024-12-06T08:54:32,998 DEBUG [RS_CLOSE_META-regionserver/25494438c68b:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45531/user/jenkins/test-data/82bbc703-c541-e4ea-52c7-ef23c1e6fd07/data/hbase/meta/1588230740/.tmp/table/bb8068448a38451db479038fbd13d206 is 52, key is TestHBaseWalOnEC/table:state/1733475272315/Put/seqid=0 2024-12-06T08:54:33,007 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41047 is added to blk_1073741842_1018 (size=5249) 2024-12-06T08:54:33,007 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35811 is added to blk_1073741842_1018 (size=5249) 2024-12-06T08:54:33,008 INFO [RS_CLOSE_META-regionserver/25494438c68b:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=96 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:45531/user/jenkins/test-data/82bbc703-c541-e4ea-52c7-ef23c1e6fd07/data/hbase/meta/1588230740/.tmp/table/bb8068448a38451db479038fbd13d206 2024-12-06T08:54:33,008 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41243 is added to blk_1073741842_1018 (size=5249) 2024-12-06T08:54:33,012 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36685-0x100668939470002, quorum=127.0.0.1:59435, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-06T08:54:33,012 INFO [RS:1;25494438c68b:36685 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-06T08:54:33,012 INFO [RS:1;25494438c68b:36685 {}] regionserver.HRegionServer(1031): Exiting; stopping=25494438c68b,36685,1733475270852; zookeeper connection closed. 2024-12-06T08:54:33,012 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36685-0x100668939470002, quorum=127.0.0.1:59435, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-06T08:54:33,013 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@6f787d47 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@6f787d47 2024-12-06T08:54:33,013 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32887-0x100668939470003, quorum=127.0.0.1:59435, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-06T08:54:33,013 INFO [RS:2;25494438c68b:32887 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-06T08:54:33,013 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32887-0x100668939470003, quorum=127.0.0.1:59435, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-06T08:54:33,013 INFO [RS:2;25494438c68b:32887 {}] regionserver.HRegionServer(1031): Exiting; stopping=25494438c68b,32887,1733475270882; zookeeper connection closed. 2024-12-06T08:54:33,014 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@a6d4283 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@a6d4283 2024-12-06T08:54:33,018 DEBUG [RS_CLOSE_META-regionserver/25494438c68b:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45531/user/jenkins/test-data/82bbc703-c541-e4ea-52c7-ef23c1e6fd07/data/hbase/meta/1588230740/.tmp/info/efc1536b47f442f2b36b1f76d53abe7a as hdfs://localhost:45531/user/jenkins/test-data/82bbc703-c541-e4ea-52c7-ef23c1e6fd07/data/hbase/meta/1588230740/info/efc1536b47f442f2b36b1f76d53abe7a 2024-12-06T08:54:33,028 INFO [RS_CLOSE_META-regionserver/25494438c68b:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45531/user/jenkins/test-data/82bbc703-c541-e4ea-52c7-ef23c1e6fd07/data/hbase/meta/1588230740/info/efc1536b47f442f2b36b1f76d53abe7a, entries=10, sequenceid=11, filesize=6.5 K 2024-12-06T08:54:33,030 DEBUG [RS_CLOSE_META-regionserver/25494438c68b:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45531/user/jenkins/test-data/82bbc703-c541-e4ea-52c7-ef23c1e6fd07/data/hbase/meta/1588230740/.tmp/ns/806a69ced35743fbbbd8d532837481da as hdfs://localhost:45531/user/jenkins/test-data/82bbc703-c541-e4ea-52c7-ef23c1e6fd07/data/hbase/meta/1588230740/ns/806a69ced35743fbbbd8d532837481da 2024-12-06T08:54:33,040 INFO [RS_CLOSE_META-regionserver/25494438c68b:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45531/user/jenkins/test-data/82bbc703-c541-e4ea-52c7-ef23c1e6fd07/data/hbase/meta/1588230740/ns/806a69ced35743fbbbd8d532837481da, entries=2, sequenceid=11, filesize=5.0 K 2024-12-06T08:54:33,041 DEBUG [RS_CLOSE_META-regionserver/25494438c68b:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45531/user/jenkins/test-data/82bbc703-c541-e4ea-52c7-ef23c1e6fd07/data/hbase/meta/1588230740/.tmp/table/bb8068448a38451db479038fbd13d206 as hdfs://localhost:45531/user/jenkins/test-data/82bbc703-c541-e4ea-52c7-ef23c1e6fd07/data/hbase/meta/1588230740/table/bb8068448a38451db479038fbd13d206 2024-12-06T08:54:33,054 INFO [RS_CLOSE_META-regionserver/25494438c68b:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45531/user/jenkins/test-data/82bbc703-c541-e4ea-52c7-ef23c1e6fd07/data/hbase/meta/1588230740/table/bb8068448a38451db479038fbd13d206, entries=2, sequenceid=11, filesize=5.1 K 2024-12-06T08:54:33,056 INFO [RS_CLOSE_META-regionserver/25494438c68b:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 164ms, sequenceid=11, compaction requested=false 2024-12-06T08:54:33,065 DEBUG [RS_CLOSE_META-regionserver/25494438c68b:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45531/user/jenkins/test-data/82bbc703-c541-e4ea-52c7-ef23c1e6fd07/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-12-06T08:54:33,066 DEBUG [RS_CLOSE_META-regionserver/25494438c68b:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-06T08:54:33,066 INFO [RS_CLOSE_META-regionserver/25494438c68b:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-06T08:54:33,066 DEBUG [RS_CLOSE_META-regionserver/25494438c68b:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733475272892Running coprocessor pre-close hooks at 1733475272892Disabling compacts and flushes for region at 1733475272892Disabling writes for close at 1733475272892Obtaining lock to block concurrent updates at 1733475272892Preparing flush snapshotting stores in 1588230740 at 1733475272892Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1377, getHeapSize=3392, getOffHeapSize=0, getCellsCount=14 at 1733475272893 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1733475272894 (+1 ms)Flushing 1588230740/info: creating writer at 1733475272894Flushing 1588230740/info: appending metadata at 1733475272923 (+29 ms)Flushing 1588230740/info: closing flushed file at 1733475272923Flushing 1588230740/ns: creating writer at 1733475272946 (+23 ms)Flushing 1588230740/ns: appending metadata at 1733475272962 (+16 ms)Flushing 1588230740/ns: closing flushed file at 1733475272962Flushing 1588230740/table: creating writer at 1733475272982 (+20 ms)Flushing 1588230740/table: appending metadata at 1733475272998 (+16 ms)Flushing 1588230740/table: closing flushed file at 1733475272998Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@472ba814: reopening flushed file at 1733475273017 (+19 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@428fc90d: reopening flushed file at 1733475273029 (+12 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5b7fb2c: reopening flushed file at 1733475273040 (+11 ms)Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 164ms, sequenceid=11, compaction requested=false at 1733475273056 (+16 ms)Writing region close event to WAL at 1733475273058 (+2 ms)Running coprocessor post-close hooks at 1733475273066 (+8 ms)Closed at 1733475273066 2024-12-06T08:54:33,066 DEBUG [RS_CLOSE_META-regionserver/25494438c68b:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-06T08:54:33,092 INFO [RS:0;25494438c68b:38179 {}] regionserver.HRegionServer(976): stopping server 25494438c68b,38179,1733475270822; all regions closed. 2024-12-06T08:54:33,093 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T08:54:33,093 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T08:54:33,093 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T08:54:33,093 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T08:54:33,094 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T08:54:33,097 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41047 is added to blk_1073741836_1012 (size=2751) 2024-12-06T08:54:33,098 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35811 is added to blk_1073741836_1012 (size=2751) 2024-12-06T08:54:33,098 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41243 is added to blk_1073741836_1012 (size=2751) 2024-12-06T08:54:33,102 DEBUG [RS:0;25494438c68b:38179 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/82bbc703-c541-e4ea-52c7-ef23c1e6fd07/oldWALs 2024-12-06T08:54:33,102 INFO [RS:0;25494438c68b:38179 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 25494438c68b%2C38179%2C1733475270822.meta:.meta(num 1733475271746) 2024-12-06T08:54:33,102 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T08:54:33,102 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T08:54:33,102 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T08:54:33,103 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T08:54:33,103 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T08:54:33,106 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41243 is added to blk_1073741835_1011 (size=1298) 2024-12-06T08:54:33,106 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35811 is added to blk_1073741835_1011 (size=1298) 2024-12-06T08:54:33,107 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41047 is added to blk_1073741835_1011 (size=1298) 2024-12-06T08:54:33,110 DEBUG [RS:0;25494438c68b:38179 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/82bbc703-c541-e4ea-52c7-ef23c1e6fd07/oldWALs 2024-12-06T08:54:33,110 INFO [RS:0;25494438c68b:38179 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 25494438c68b%2C38179%2C1733475270822:(num 1733475271406) 2024-12-06T08:54:33,110 DEBUG [RS:0;25494438c68b:38179 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T08:54:33,110 INFO [RS:0;25494438c68b:38179 {}] regionserver.LeaseManager(133): Closed leases 2024-12-06T08:54:33,110 INFO [RS:0;25494438c68b:38179 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-06T08:54:33,110 INFO [RS:0;25494438c68b:38179 {}] hbase.ChoreService(370): Chore service for: regionserver/25494438c68b:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-12-06T08:54:33,110 INFO [RS:0;25494438c68b:38179 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-06T08:54:33,110 INFO [regionserver/25494438c68b:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-06T08:54:33,111 INFO [RS:0;25494438c68b:38179 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:38179 2024-12-06T08:54:33,113 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38179-0x100668939470001, quorum=127.0.0.1:59435, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/25494438c68b,38179,1733475270822 2024-12-06T08:54:33,113 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43489-0x100668939470000, quorum=127.0.0.1:59435, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-06T08:54:33,113 INFO [RS:0;25494438c68b:38179 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-06T08:54:33,115 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [25494438c68b,38179,1733475270822] 2024-12-06T08:54:33,117 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/25494438c68b,38179,1733475270822 already deleted, retry=false 2024-12-06T08:54:33,117 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 25494438c68b,38179,1733475270822 expired; onlineServers=0 2024-12-06T08:54:33,117 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '25494438c68b,43489,1733475270773' ***** 2024-12-06T08:54:33,117 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-06T08:54:33,117 INFO [M:0;25494438c68b:43489 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-06T08:54:33,117 INFO [M:0;25494438c68b:43489 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-06T08:54:33,118 DEBUG [M:0;25494438c68b:43489 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-06T08:54:33,118 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-06T08:54:33,118 DEBUG [M:0;25494438c68b:43489 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-06T08:54:33,118 DEBUG [master/25494438c68b:0:becomeActiveMaster-HFileCleaner.small.0-1733475271122 {}] cleaner.HFileCleaner(306): Exit Thread[master/25494438c68b:0:becomeActiveMaster-HFileCleaner.small.0-1733475271122,5,FailOnTimeoutGroup] 2024-12-06T08:54:33,118 DEBUG [master/25494438c68b:0:becomeActiveMaster-HFileCleaner.large.0-1733475271122 {}] cleaner.HFileCleaner(306): Exit Thread[master/25494438c68b:0:becomeActiveMaster-HFileCleaner.large.0-1733475271122,5,FailOnTimeoutGroup] 2024-12-06T08:54:33,118 INFO [M:0;25494438c68b:43489 {}] hbase.ChoreService(370): Chore service for: master/25494438c68b:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-06T08:54:33,118 INFO [M:0;25494438c68b:43489 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-06T08:54:33,118 DEBUG [M:0;25494438c68b:43489 {}] master.HMaster(1795): Stopping service threads 2024-12-06T08:54:33,118 INFO [M:0;25494438c68b:43489 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-06T08:54:33,119 INFO [M:0;25494438c68b:43489 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-06T08:54:33,119 INFO [M:0;25494438c68b:43489 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-06T08:54:33,119 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-06T08:54:33,120 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43489-0x100668939470000, quorum=127.0.0.1:59435, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-06T08:54:33,120 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43489-0x100668939470000, quorum=127.0.0.1:59435, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T08:54:33,121 DEBUG [M:0;25494438c68b:43489 {}] zookeeper.ZKUtil(347): master:43489-0x100668939470000, quorum=127.0.0.1:59435, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-06T08:54:33,121 WARN [M:0;25494438c68b:43489 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-06T08:54:33,122 INFO [M:0;25494438c68b:43489 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:45531/user/jenkins/test-data/82bbc703-c541-e4ea-52c7-ef23c1e6fd07/.lastflushedseqids 2024-12-06T08:54:33,138 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41243 is added to blk_1073741843_1019 (size=127) 2024-12-06T08:54:33,139 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35811 is added to blk_1073741843_1019 (size=127) 2024-12-06T08:54:33,139 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41047 is added to blk_1073741843_1019 (size=127) 2024-12-06T08:54:33,140 INFO [M:0;25494438c68b:43489 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-06T08:54:33,140 INFO [M:0;25494438c68b:43489 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-06T08:54:33,141 DEBUG [M:0;25494438c68b:43489 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-06T08:54:33,141 INFO [M:0;25494438c68b:43489 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T08:54:33,141 DEBUG [M:0;25494438c68b:43489 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T08:54:33,141 DEBUG [M:0;25494438c68b:43489 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-06T08:54:33,141 DEBUG [M:0;25494438c68b:43489 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T08:54:33,141 INFO [M:0;25494438c68b:43489 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=26.82 KB heapSize=34.11 KB 2024-12-06T08:54:33,162 DEBUG [M:0;25494438c68b:43489 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45531/user/jenkins/test-data/82bbc703-c541-e4ea-52c7-ef23c1e6fd07/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/998a407b47bc46f79f17db8ec5d1c01d is 82, key is hbase:meta,,1/info:regioninfo/1733475271785/Put/seqid=0 2024-12-06T08:54:33,178 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35811 is added to blk_1073741844_1020 (size=5672) 2024-12-06T08:54:33,179 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41243 is added to blk_1073741844_1020 (size=5672) 2024-12-06T08:54:33,180 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41047 is added to blk_1073741844_1020 (size=5672) 2024-12-06T08:54:33,180 INFO [M:0;25494438c68b:43489 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:45531/user/jenkins/test-data/82bbc703-c541-e4ea-52c7-ef23c1e6fd07/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/998a407b47bc46f79f17db8ec5d1c01d 2024-12-06T08:54:33,207 DEBUG [M:0;25494438c68b:43489 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45531/user/jenkins/test-data/82bbc703-c541-e4ea-52c7-ef23c1e6fd07/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/5e585760733b4b218cf2f3520c6f63b1 is 747, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1733475272322/Put/seqid=0 2024-12-06T08:54:33,216 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38179-0x100668939470001, quorum=127.0.0.1:59435, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-06T08:54:33,216 INFO [RS:0;25494438c68b:38179 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-06T08:54:33,216 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38179-0x100668939470001, quorum=127.0.0.1:59435, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-06T08:54:33,216 INFO [RS:0;25494438c68b:38179 {}] regionserver.HRegionServer(1031): Exiting; stopping=25494438c68b,38179,1733475270822; zookeeper connection closed. 2024-12-06T08:54:33,216 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35811 is added to blk_1073741845_1021 (size=6438) 2024-12-06T08:54:33,216 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41047 is added to blk_1073741845_1021 (size=6438) 2024-12-06T08:54:33,217 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41243 is added to blk_1073741845_1021 (size=6438) 2024-12-06T08:54:33,217 INFO [M:0;25494438c68b:43489 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.13 KB at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:45531/user/jenkins/test-data/82bbc703-c541-e4ea-52c7-ef23c1e6fd07/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/5e585760733b4b218cf2f3520c6f63b1 2024-12-06T08:54:33,218 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@3100812d {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@3100812d 2024-12-06T08:54:33,218 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 3 regionserver(s) complete 2024-12-06T08:54:33,249 DEBUG [M:0;25494438c68b:43489 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45531/user/jenkins/test-data/82bbc703-c541-e4ea-52c7-ef23c1e6fd07/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/b64a49b1daad4781a5f54bf1383e4065 is 69, key is 25494438c68b,32887,1733475270882/rs:state/1733475271227/Put/seqid=0 2024-12-06T08:54:33,257 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41047 is added to blk_1073741846_1022 (size=5294) 2024-12-06T08:54:33,258 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35811 is added to blk_1073741846_1022 (size=5294) 2024-12-06T08:54:33,258 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41243 is added to blk_1073741846_1022 (size=5294) 2024-12-06T08:54:33,258 INFO [M:0;25494438c68b:43489 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=195 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:45531/user/jenkins/test-data/82bbc703-c541-e4ea-52c7-ef23c1e6fd07/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/b64a49b1daad4781a5f54bf1383e4065 2024-12-06T08:54:33,267 DEBUG [M:0;25494438c68b:43489 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45531/user/jenkins/test-data/82bbc703-c541-e4ea-52c7-ef23c1e6fd07/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/998a407b47bc46f79f17db8ec5d1c01d as hdfs://localhost:45531/user/jenkins/test-data/82bbc703-c541-e4ea-52c7-ef23c1e6fd07/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/998a407b47bc46f79f17db8ec5d1c01d 2024-12-06T08:54:33,275 INFO [M:0;25494438c68b:43489 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45531/user/jenkins/test-data/82bbc703-c541-e4ea-52c7-ef23c1e6fd07/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/998a407b47bc46f79f17db8ec5d1c01d, entries=8, sequenceid=72, filesize=5.5 K 2024-12-06T08:54:33,276 DEBUG [M:0;25494438c68b:43489 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45531/user/jenkins/test-data/82bbc703-c541-e4ea-52c7-ef23c1e6fd07/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/5e585760733b4b218cf2f3520c6f63b1 as hdfs://localhost:45531/user/jenkins/test-data/82bbc703-c541-e4ea-52c7-ef23c1e6fd07/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/5e585760733b4b218cf2f3520c6f63b1 2024-12-06T08:54:33,284 INFO [M:0;25494438c68b:43489 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45531/user/jenkins/test-data/82bbc703-c541-e4ea-52c7-ef23c1e6fd07/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/5e585760733b4b218cf2f3520c6f63b1, entries=8, sequenceid=72, filesize=6.3 K 2024-12-06T08:54:33,285 DEBUG [M:0;25494438c68b:43489 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45531/user/jenkins/test-data/82bbc703-c541-e4ea-52c7-ef23c1e6fd07/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/b64a49b1daad4781a5f54bf1383e4065 as hdfs://localhost:45531/user/jenkins/test-data/82bbc703-c541-e4ea-52c7-ef23c1e6fd07/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/b64a49b1daad4781a5f54bf1383e4065 2024-12-06T08:54:33,293 INFO [M:0;25494438c68b:43489 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45531/user/jenkins/test-data/82bbc703-c541-e4ea-52c7-ef23c1e6fd07/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/b64a49b1daad4781a5f54bf1383e4065, entries=3, sequenceid=72, filesize=5.2 K 2024-12-06T08:54:33,295 INFO [M:0;25494438c68b:43489 {}] regionserver.HRegion(3140): Finished flush of dataSize ~26.82 KB/27459, heapSize ~33.81 KB/34624, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 154ms, sequenceid=72, compaction requested=false 2024-12-06T08:54:33,296 INFO [M:0;25494438c68b:43489 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T08:54:33,296 DEBUG [M:0;25494438c68b:43489 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733475273141Disabling compacts and flushes for region at 1733475273141Disabling writes for close at 1733475273141Obtaining lock to block concurrent updates at 1733475273141Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733475273141Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=27459, getHeapSize=34864, getOffHeapSize=0, getCellsCount=85 at 1733475273142 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733475273143 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733475273143Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733475273162 (+19 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733475273162Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733475273189 (+27 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733475273207 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733475273207Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733475273225 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733475273248 (+23 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733475273248Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6ac31f03: reopening flushed file at 1733475273266 (+18 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6582e519: reopening flushed file at 1733475273275 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@42416591: reopening flushed file at 1733475273284 (+9 ms)Finished flush of dataSize ~26.82 KB/27459, heapSize ~33.81 KB/34624, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 154ms, sequenceid=72, compaction requested=false at 1733475273295 (+11 ms)Writing region close event to WAL at 1733475273296 (+1 ms)Closed at 1733475273296 2024-12-06T08:54:33,297 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T08:54:33,297 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T08:54:33,297 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T08:54:33,298 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T08:54:33,298 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T08:54:33,300 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41047 is added to blk_1073741830_1006 (size=32662) 2024-12-06T08:54:33,301 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35811 is added to blk_1073741830_1006 (size=32662) 2024-12-06T08:54:33,301 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41243 is added to blk_1073741830_1006 (size=32662) 2024-12-06T08:54:33,302 INFO [M:0;25494438c68b:43489 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-06T08:54:33,302 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-06T08:54:33,302 INFO [M:0;25494438c68b:43489 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:43489 2024-12-06T08:54:33,302 INFO [M:0;25494438c68b:43489 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-06T08:54:33,405 INFO [M:0;25494438c68b:43489 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-06T08:54:33,404 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43489-0x100668939470000, quorum=127.0.0.1:59435, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-06T08:54:33,405 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43489-0x100668939470000, quorum=127.0.0.1:59435, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-06T08:54:33,407 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3939dfd7{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-06T08:54:33,408 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6419fd60{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-06T08:54:33,408 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-06T08:54:33,408 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@d6974a6{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-06T08:54:33,408 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@611fd8d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/549b6d72-8ca8-2512-d8fa-e29199bcd171/hadoop.log.dir/,STOPPED} 2024-12-06T08:54:33,409 WARN [BP-1653138612-172.17.0.2-1733475269774 heartbeating to localhost/127.0.0.1:45531 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-06T08:54:33,409 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-06T08:54:33,410 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-06T08:54:33,410 WARN [BP-1653138612-172.17.0.2-1733475269774 heartbeating to localhost/127.0.0.1:45531 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1653138612-172.17.0.2-1733475269774 (Datanode Uuid 6193c713-3c98-4ec7-9ea7-444dca86cfba) service to localhost/127.0.0.1:45531 2024-12-06T08:54:33,410 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/549b6d72-8ca8-2512-d8fa-e29199bcd171/cluster_16955451-ced9-75a0-5e67-0ca948b704b1/data/data5/current/BP-1653138612-172.17.0.2-1733475269774 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-06T08:54:33,411 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/549b6d72-8ca8-2512-d8fa-e29199bcd171/cluster_16955451-ced9-75a0-5e67-0ca948b704b1/data/data6/current/BP-1653138612-172.17.0.2-1733475269774 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-06T08:54:33,411 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-06T08:54:33,413 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@65e525b7{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-06T08:54:33,413 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4bd07676{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-06T08:54:33,413 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-06T08:54:33,413 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@54390a13{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-06T08:54:33,414 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6de03e39{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/549b6d72-8ca8-2512-d8fa-e29199bcd171/hadoop.log.dir/,STOPPED} 2024-12-06T08:54:33,415 WARN [BP-1653138612-172.17.0.2-1733475269774 heartbeating to localhost/127.0.0.1:45531 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-06T08:54:33,415 WARN [BP-1653138612-172.17.0.2-1733475269774 heartbeating to localhost/127.0.0.1:45531 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1653138612-172.17.0.2-1733475269774 (Datanode Uuid 719839d2-1818-4496-8441-cf87aedcce6a) service to localhost/127.0.0.1:45531 2024-12-06T08:54:33,415 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-06T08:54:33,415 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-06T08:54:33,416 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/549b6d72-8ca8-2512-d8fa-e29199bcd171/cluster_16955451-ced9-75a0-5e67-0ca948b704b1/data/data3/current/BP-1653138612-172.17.0.2-1733475269774 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-06T08:54:33,416 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/549b6d72-8ca8-2512-d8fa-e29199bcd171/cluster_16955451-ced9-75a0-5e67-0ca948b704b1/data/data4/current/BP-1653138612-172.17.0.2-1733475269774 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-06T08:54:33,416 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-06T08:54:33,418 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1aa34083{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-06T08:54:33,419 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@234ffd1d{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-06T08:54:33,419 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-06T08:54:33,419 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3bfb2a2f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-06T08:54:33,419 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3ef18cbe{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/549b6d72-8ca8-2512-d8fa-e29199bcd171/hadoop.log.dir/,STOPPED} 2024-12-06T08:54:33,420 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-06T08:54:33,420 WARN [BP-1653138612-172.17.0.2-1733475269774 heartbeating to localhost/127.0.0.1:45531 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-06T08:54:33,420 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-06T08:54:33,421 WARN [BP-1653138612-172.17.0.2-1733475269774 heartbeating to localhost/127.0.0.1:45531 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1653138612-172.17.0.2-1733475269774 (Datanode Uuid c559ffaf-7d4b-4bb2-85de-a3eadac2f48f) service to localhost/127.0.0.1:45531 2024-12-06T08:54:33,421 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/549b6d72-8ca8-2512-d8fa-e29199bcd171/cluster_16955451-ced9-75a0-5e67-0ca948b704b1/data/data1/current/BP-1653138612-172.17.0.2-1733475269774 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-06T08:54:33,422 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/549b6d72-8ca8-2512-d8fa-e29199bcd171/cluster_16955451-ced9-75a0-5e67-0ca948b704b1/data/data2/current/BP-1653138612-172.17.0.2-1733475269774 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-06T08:54:33,422 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-06T08:54:33,429 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5d637fa1{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-06T08:54:33,430 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6684a7bb{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-06T08:54:33,430 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-06T08:54:33,430 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@117b7671{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-06T08:54:33,430 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1921d73d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/549b6d72-8ca8-2512-d8fa-e29199bcd171/hadoop.log.dir/,STOPPED} 2024-12-06T08:54:33,441 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-06T08:54:33,466 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-06T08:54:33,476 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestHBaseWalOnEC#testReadWrite[1] Thread=150 (was 91) - Thread LEAK? -, OpenFileDescriptor=526 (was 443) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=214 (was 215), ProcessCount=11 (was 11), AvailableMemoryMB=8605 (was 8782)