2024-12-07 17:45:59,497 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba 2024-12-07 17:45:59,508 main DEBUG Took 0.009264 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-12-07 17:45:59,509 main DEBUG PluginManager 'Core' found 129 plugins 2024-12-07 17:45:59,509 main DEBUG PluginManager 'Level' found 0 plugins 2024-12-07 17:45:59,510 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-12-07 17:45:59,511 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-07 17:45:59,525 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-12-07 17:45:59,535 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-07 17:45:59,536 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-07 17:45:59,537 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-07 17:45:59,537 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-07 17:45:59,538 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-07 17:45:59,538 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-07 17:45:59,538 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-07 17:45:59,539 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-07 17:45:59,539 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-07 17:45:59,539 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-07 17:45:59,540 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-07 17:45:59,540 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-07 17:45:59,541 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-07 17:45:59,541 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-07 17:45:59,542 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-07 17:45:59,542 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-07 17:45:59,542 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-07 17:45:59,543 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-07 17:45:59,543 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-07 17:45:59,543 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-07 17:45:59,544 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-07 17:45:59,544 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-07 17:45:59,544 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-07 17:45:59,544 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-07 17:45:59,545 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-07 17:45:59,545 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-12-07 17:45:59,546 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-07 17:45:59,547 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-12-07 17:45:59,549 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-12-07 17:45:59,549 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-12-07 17:45:59,551 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-12-07 17:45:59,551 main DEBUG PluginManager 'Converter' found 47 plugins 2024-12-07 17:45:59,560 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-12-07 17:45:59,562 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-12-07 17:45:59,564 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-12-07 17:45:59,564 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-12-07 17:45:59,565 main DEBUG createAppenders(={Console}) 2024-12-07 17:45:59,566 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba initialized 2024-12-07 17:45:59,566 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba 2024-12-07 17:45:59,566 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba OK. 2024-12-07 17:45:59,567 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-12-07 17:45:59,567 main DEBUG OutputStream closed 2024-12-07 17:45:59,567 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-12-07 17:45:59,567 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-12-07 17:45:59,568 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@49c7b90e OK 2024-12-07 17:45:59,630 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-12-07 17:45:59,632 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-12-07 17:45:59,633 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-12-07 17:45:59,635 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-12-07 17:45:59,635 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-12-07 17:45:59,635 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-12-07 17:45:59,636 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-12-07 17:45:59,636 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-12-07 17:45:59,636 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-12-07 17:45:59,637 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-12-07 17:45:59,637 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-12-07 17:45:59,637 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-12-07 17:45:59,637 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-12-07 17:45:59,638 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-12-07 17:45:59,638 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-12-07 17:45:59,638 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-12-07 17:45:59,638 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-12-07 17:45:59,639 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-12-07 17:45:59,641 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-07 17:45:59,641 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-logging/target/hbase-logging-4.0.0-alpha-1-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@35432107) with optional ClassLoader: null 2024-12-07 17:45:59,642 main DEBUG Shutdown hook enabled. Registering a new one. 2024-12-07 17:45:59,642 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@35432107] started OK. 2024-12-07T17:45:59,655 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC timeout: 26 mins 2024-12-07 17:45:59,658 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-12-07 17:45:59,658 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-07T17:45:59,877 DEBUG [main {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9ab1ebba-00a0-b029-e9e0-42b71c7e5d49 2024-12-07T17:45:59,900 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9ab1ebba-00a0-b029-e9e0-42b71c7e5d49/cluster_5cbc8343-3cb0-70c7-7ccf-ef2708808c54, deleteOnExit=true 2024-12-07T17:45:59,901 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9ab1ebba-00a0-b029-e9e0-42b71c7e5d49/test.cache.data in system properties and HBase conf 2024-12-07T17:45:59,902 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9ab1ebba-00a0-b029-e9e0-42b71c7e5d49/hadoop.tmp.dir in system properties and HBase conf 2024-12-07T17:45:59,902 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9ab1ebba-00a0-b029-e9e0-42b71c7e5d49/hadoop.log.dir in system properties and HBase conf 2024-12-07T17:45:59,903 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9ab1ebba-00a0-b029-e9e0-42b71c7e5d49/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-07T17:45:59,903 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9ab1ebba-00a0-b029-e9e0-42b71c7e5d49/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-07T17:45:59,904 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-07T17:45:59,987 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-12-07T17:46:00,070 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-07T17:46:00,073 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9ab1ebba-00a0-b029-e9e0-42b71c7e5d49/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-07T17:46:00,073 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9ab1ebba-00a0-b029-e9e0-42b71c7e5d49/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-07T17:46:00,074 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9ab1ebba-00a0-b029-e9e0-42b71c7e5d49/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-07T17:46:00,074 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9ab1ebba-00a0-b029-e9e0-42b71c7e5d49/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-07T17:46:00,074 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9ab1ebba-00a0-b029-e9e0-42b71c7e5d49/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-07T17:46:00,075 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9ab1ebba-00a0-b029-e9e0-42b71c7e5d49/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-07T17:46:00,075 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9ab1ebba-00a0-b029-e9e0-42b71c7e5d49/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-07T17:46:00,075 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9ab1ebba-00a0-b029-e9e0-42b71c7e5d49/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-07T17:46:00,076 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9ab1ebba-00a0-b029-e9e0-42b71c7e5d49/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-07T17:46:00,076 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9ab1ebba-00a0-b029-e9e0-42b71c7e5d49/nfs.dump.dir in system properties and HBase conf 2024-12-07T17:46:00,076 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9ab1ebba-00a0-b029-e9e0-42b71c7e5d49/java.io.tmpdir in system properties and HBase conf 2024-12-07T17:46:00,077 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9ab1ebba-00a0-b029-e9e0-42b71c7e5d49/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-07T17:46:00,077 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9ab1ebba-00a0-b029-e9e0-42b71c7e5d49/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-07T17:46:00,077 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9ab1ebba-00a0-b029-e9e0-42b71c7e5d49/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-07T17:46:00,974 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-12-07T17:46:01,034 INFO [Time-limited test {}] log.Log(170): Logging initialized @2105ms to org.eclipse.jetty.util.log.Slf4jLog 2024-12-07T17:46:01,095 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-07T17:46:01,145 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-07T17:46:01,164 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-07T17:46:01,164 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-07T17:46:01,165 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-07T17:46:01,177 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-07T17:46:01,180 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@21b7d177{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9ab1ebba-00a0-b029-e9e0-42b71c7e5d49/hadoop.log.dir/,AVAILABLE} 2024-12-07T17:46:01,181 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@383d55e4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-07T17:46:01,375 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@76e4c45c{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9ab1ebba-00a0-b029-e9e0-42b71c7e5d49/java.io.tmpdir/jetty-localhost-38603-hadoop-hdfs-3_4_1-tests_jar-_-any-16309805263403418064/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-07T17:46:01,386 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4637aff6{HTTP/1.1, (http/1.1)}{localhost:38603} 2024-12-07T17:46:01,387 INFO [Time-limited test {}] server.Server(415): Started @2458ms 2024-12-07T17:46:01,889 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-07T17:46:01,896 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-07T17:46:01,897 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-07T17:46:01,897 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-07T17:46:01,898 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-07T17:46:01,898 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@550154bd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9ab1ebba-00a0-b029-e9e0-42b71c7e5d49/hadoop.log.dir/,AVAILABLE} 2024-12-07T17:46:01,899 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1a2478ad{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-07T17:46:01,996 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4839957b{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9ab1ebba-00a0-b029-e9e0-42b71c7e5d49/java.io.tmpdir/jetty-localhost-37059-hadoop-hdfs-3_4_1-tests_jar-_-any-14177727607520484016/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-07T17:46:01,996 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5306f615{HTTP/1.1, (http/1.1)}{localhost:37059} 2024-12-07T17:46:01,997 INFO [Time-limited test {}] server.Server(415): Started @3068ms 2024-12-07T17:46:02,043 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-07T17:46:02,141 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-07T17:46:02,145 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-07T17:46:02,147 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-07T17:46:02,147 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-07T17:46:02,147 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-07T17:46:02,149 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6463ad04{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9ab1ebba-00a0-b029-e9e0-42b71c7e5d49/hadoop.log.dir/,AVAILABLE} 2024-12-07T17:46:02,149 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7fa8fa5c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-07T17:46:02,245 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1c6b8f01{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9ab1ebba-00a0-b029-e9e0-42b71c7e5d49/java.io.tmpdir/jetty-localhost-33357-hadoop-hdfs-3_4_1-tests_jar-_-any-7744737700869855340/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-07T17:46:02,246 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@11f28dd2{HTTP/1.1, (http/1.1)}{localhost:33357} 2024-12-07T17:46:02,247 INFO [Time-limited test {}] server.Server(415): Started @3318ms 2024-12-07T17:46:02,249 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-07T17:46:02,283 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-07T17:46:02,287 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-07T17:46:02,290 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-07T17:46:02,290 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-07T17:46:02,291 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-07T17:46:02,291 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@c62369b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9ab1ebba-00a0-b029-e9e0-42b71c7e5d49/hadoop.log.dir/,AVAILABLE} 2024-12-07T17:46:02,292 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@24f92c39{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-07T17:46:02,385 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2e59159d{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9ab1ebba-00a0-b029-e9e0-42b71c7e5d49/java.io.tmpdir/jetty-localhost-39269-hadoop-hdfs-3_4_1-tests_jar-_-any-16493968580696993734/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-07T17:46:02,386 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@a8e922f{HTTP/1.1, (http/1.1)}{localhost:39269} 2024-12-07T17:46:02,386 INFO [Time-limited test {}] server.Server(415): Started @3457ms 2024-12-07T17:46:02,388 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-07T17:46:03,321 WARN [Thread-125 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9ab1ebba-00a0-b029-e9e0-42b71c7e5d49/cluster_5cbc8343-3cb0-70c7-7ccf-ef2708808c54/data/data4/current/BP-1730434801-172.17.0.2-1733593560554/current, will proceed with Du for space computation calculation, 2024-12-07T17:46:03,321 WARN [Thread-122 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9ab1ebba-00a0-b029-e9e0-42b71c7e5d49/cluster_5cbc8343-3cb0-70c7-7ccf-ef2708808c54/data/data1/current/BP-1730434801-172.17.0.2-1733593560554/current, will proceed with Du for space computation calculation, 2024-12-07T17:46:03,321 WARN [Thread-123 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9ab1ebba-00a0-b029-e9e0-42b71c7e5d49/cluster_5cbc8343-3cb0-70c7-7ccf-ef2708808c54/data/data3/current/BP-1730434801-172.17.0.2-1733593560554/current, will proceed with Du for space computation calculation, 2024-12-07T17:46:03,321 WARN [Thread-124 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9ab1ebba-00a0-b029-e9e0-42b71c7e5d49/cluster_5cbc8343-3cb0-70c7-7ccf-ef2708808c54/data/data2/current/BP-1730434801-172.17.0.2-1733593560554/current, will proceed with Du for space computation calculation, 2024-12-07T17:46:03,354 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-07T17:46:03,354 WARN [Thread-81 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-07T17:46:03,368 WARN [Thread-142 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9ab1ebba-00a0-b029-e9e0-42b71c7e5d49/cluster_5cbc8343-3cb0-70c7-7ccf-ef2708808c54/data/data5/current/BP-1730434801-172.17.0.2-1733593560554/current, will proceed with Du for space computation calculation, 2024-12-07T17:46:03,368 WARN [Thread-143 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9ab1ebba-00a0-b029-e9e0-42b71c7e5d49/cluster_5cbc8343-3cb0-70c7-7ccf-ef2708808c54/data/data6/current/BP-1730434801-172.17.0.2-1733593560554/current, will proceed with Du for space computation calculation, 2024-12-07T17:46:03,390 WARN [Thread-103 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-07T17:46:03,401 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xb17def5486cb947f with lease ID 0xed41a6c2deb6f3d1: Processing first storage report for DS-3843ebc5-805b-4b6e-a742-a831020a81a9 from datanode DatanodeRegistration(127.0.0.1:38303, datanodeUuid=115d20bd-56af-4935-b69b-458a196ec151, infoPort=43253, infoSecurePort=0, ipcPort=39497, storageInfo=lv=-57;cid=testClusterID;nsid=819680346;c=1733593560554) 2024-12-07T17:46:03,402 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb17def5486cb947f with lease ID 0xed41a6c2deb6f3d1: from storage DS-3843ebc5-805b-4b6e-a742-a831020a81a9 node DatanodeRegistration(127.0.0.1:38303, datanodeUuid=115d20bd-56af-4935-b69b-458a196ec151, infoPort=43253, infoSecurePort=0, ipcPort=39497, storageInfo=lv=-57;cid=testClusterID;nsid=819680346;c=1733593560554), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-07T17:46:03,402 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x538df2cfaf1b486d with lease ID 0xed41a6c2deb6f3cf: Processing first storage report for DS-4c651403-0178-49b1-8770-385cd4b250ca from datanode DatanodeRegistration(127.0.0.1:38991, datanodeUuid=b159a9a8-a08c-46b8-a779-5f00ed87531a, infoPort=40873, infoSecurePort=0, ipcPort=39397, storageInfo=lv=-57;cid=testClusterID;nsid=819680346;c=1733593560554) 2024-12-07T17:46:03,402 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x538df2cfaf1b486d with lease ID 0xed41a6c2deb6f3cf: from storage DS-4c651403-0178-49b1-8770-385cd4b250ca node DatanodeRegistration(127.0.0.1:38991, datanodeUuid=b159a9a8-a08c-46b8-a779-5f00ed87531a, infoPort=40873, infoSecurePort=0, ipcPort=39397, storageInfo=lv=-57;cid=testClusterID;nsid=819680346;c=1733593560554), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-07T17:46:03,402 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x7980e4b88a580156 with lease ID 0xed41a6c2deb6f3d0: Processing first storage report for DS-deaa7d0e-da30-4026-b202-38064c06578f from datanode DatanodeRegistration(127.0.0.1:41011, datanodeUuid=542f0800-17a5-447d-b3c2-acd6b34d20d7, infoPort=34253, infoSecurePort=0, ipcPort=35025, storageInfo=lv=-57;cid=testClusterID;nsid=819680346;c=1733593560554) 2024-12-07T17:46:03,402 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x7980e4b88a580156 with lease ID 0xed41a6c2deb6f3d0: from storage DS-deaa7d0e-da30-4026-b202-38064c06578f node DatanodeRegistration(127.0.0.1:41011, datanodeUuid=542f0800-17a5-447d-b3c2-acd6b34d20d7, infoPort=34253, infoSecurePort=0, ipcPort=35025, storageInfo=lv=-57;cid=testClusterID;nsid=819680346;c=1733593560554), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-07T17:46:03,403 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xb17def5486cb947f with lease ID 0xed41a6c2deb6f3d1: Processing first storage report for DS-3c2ed788-9059-4edc-870c-e6242eefb3b2 from datanode DatanodeRegistration(127.0.0.1:38303, datanodeUuid=115d20bd-56af-4935-b69b-458a196ec151, infoPort=43253, infoSecurePort=0, ipcPort=39497, storageInfo=lv=-57;cid=testClusterID;nsid=819680346;c=1733593560554) 2024-12-07T17:46:03,403 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb17def5486cb947f with lease ID 0xed41a6c2deb6f3d1: from storage DS-3c2ed788-9059-4edc-870c-e6242eefb3b2 node DatanodeRegistration(127.0.0.1:38303, datanodeUuid=115d20bd-56af-4935-b69b-458a196ec151, infoPort=43253, infoSecurePort=0, ipcPort=39497, storageInfo=lv=-57;cid=testClusterID;nsid=819680346;c=1733593560554), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-07T17:46:03,403 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x538df2cfaf1b486d with lease ID 0xed41a6c2deb6f3cf: Processing first storage report for DS-3d14c573-c148-434f-bf91-8c2cb739ec67 from datanode DatanodeRegistration(127.0.0.1:38991, datanodeUuid=b159a9a8-a08c-46b8-a779-5f00ed87531a, infoPort=40873, infoSecurePort=0, ipcPort=39397, storageInfo=lv=-57;cid=testClusterID;nsid=819680346;c=1733593560554) 2024-12-07T17:46:03,403 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x538df2cfaf1b486d with lease ID 0xed41a6c2deb6f3cf: from storage DS-3d14c573-c148-434f-bf91-8c2cb739ec67 node DatanodeRegistration(127.0.0.1:38991, datanodeUuid=b159a9a8-a08c-46b8-a779-5f00ed87531a, infoPort=40873, infoSecurePort=0, ipcPort=39397, storageInfo=lv=-57;cid=testClusterID;nsid=819680346;c=1733593560554), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-07T17:46:03,403 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x7980e4b88a580156 with lease ID 0xed41a6c2deb6f3d0: Processing first storage report for DS-70ecc333-1cc1-4994-9593-f8fae24de2ac from datanode DatanodeRegistration(127.0.0.1:41011, datanodeUuid=542f0800-17a5-447d-b3c2-acd6b34d20d7, infoPort=34253, infoSecurePort=0, ipcPort=35025, storageInfo=lv=-57;cid=testClusterID;nsid=819680346;c=1733593560554) 2024-12-07T17:46:03,403 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x7980e4b88a580156 with lease ID 0xed41a6c2deb6f3d0: from storage DS-70ecc333-1cc1-4994-9593-f8fae24de2ac node DatanodeRegistration(127.0.0.1:41011, datanodeUuid=542f0800-17a5-447d-b3c2-acd6b34d20d7, infoPort=34253, infoSecurePort=0, ipcPort=35025, storageInfo=lv=-57;cid=testClusterID;nsid=819680346;c=1733593560554), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-07T17:46:03,468 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9ab1ebba-00a0-b029-e9e0-42b71c7e5d49 2024-12-07T17:46:03,538 WARN [Time-limited test {}] erasurecode.ErasureCodeNative(55): ISA-L support is not available in your platform... using builtin-java codec where applicable 2024-12-07T17:46:03,584 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestHBaseWalOnEC#testReadWrite[0] Thread=161, OpenFileDescriptor=391, MaxFileDescriptor=1048576, SystemLoadAverage=158, ProcessCount=11, AvailableMemoryMB=20419 2024-12-07T17:46:03,587 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=3, rsPorts=, rsClass=null, numDataNodes=3, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-07T17:46:03,597 INFO [Time-limited test {}] hbase.HBaseTestingUtil(821): NOT STARTING DFS 2024-12-07T17:46:03,683 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9ab1ebba-00a0-b029-e9e0-42b71c7e5d49/cluster_5cbc8343-3cb0-70c7-7ccf-ef2708808c54/zookeeper_0, clientPort=60863, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9ab1ebba-00a0-b029-e9e0-42b71c7e5d49/cluster_5cbc8343-3cb0-70c7-7ccf-ef2708808c54/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9ab1ebba-00a0-b029-e9e0-42b71c7e5d49/cluster_5cbc8343-3cb0-70c7-7ccf-ef2708808c54/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-07T17:46:03,692 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=60863 2024-12-07T17:46:03,710 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T17:46:03,712 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T17:46:03,793 WARN [Time-limited test {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-07T17:46:03,793 WARN [Time-limited test {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-07T17:46:03,830 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-941362637_22 at /127.0.0.1:50954 [Receiving block BP-1730434801-172.17.0.2-1733593560554:blk_-9223372036854775792_1001] {}] datanode.DataXceiver(331): 127.0.0.1:41011:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:50954 dst: /127.0.0.1:41011 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T17:46:03,850 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41011 is added to blk_-9223372036854775792_1002 (size=7) 2024-12-07T17:46:04,250 WARN [Time-limited test {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-07T17:46:04,261 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:35255/user/jenkins/test-data/54aa0d7e-ded0-c68b-f87e-78244f096baf with version=8 2024-12-07T17:46:04,262 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:35255/user/jenkins/test-data/54aa0d7e-ded0-c68b-f87e-78244f096baf/hbase-staging 2024-12-07T17:46:04,337 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-12-07T17:46:04,547 INFO [Time-limited test {}] client.ConnectionUtils(128): master/bd53b59592b3:0 server-side Connection retries=45 2024-12-07T17:46:04,555 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-07T17:46:04,556 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-07T17:46:04,560 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-07T17:46:04,560 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-07T17:46:04,560 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-07T17:46:04,668 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-07T17:46:04,716 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-12-07T17:46:04,723 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-12-07T17:46:04,726 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-07T17:46:04,747 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 19667 (auto-detected) 2024-12-07T17:46:04,747 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-12-07T17:46:04,764 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:35933 2024-12-07T17:46:04,782 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:35933 connecting to ZooKeeper ensemble=127.0.0.1:60863 2024-12-07T17:46:04,870 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:359330x0, quorum=127.0.0.1:60863, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-07T17:46:04,872 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:35933-0x100017040af0000 connected 2024-12-07T17:46:04,948 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T17:46:04,950 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T17:46:04,959 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:35933-0x100017040af0000, quorum=127.0.0.1:60863, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-07T17:46:04,962 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:35255/user/jenkins/test-data/54aa0d7e-ded0-c68b-f87e-78244f096baf, hbase.cluster.distributed=false 2024-12-07T17:46:04,982 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:35933-0x100017040af0000, quorum=127.0.0.1:60863, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-07T17:46:04,986 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=35933 2024-12-07T17:46:04,986 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=35933 2024-12-07T17:46:04,986 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=35933 2024-12-07T17:46:04,988 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=35933 2024-12-07T17:46:04,989 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=35933 2024-12-07T17:46:05,074 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/bd53b59592b3:0 server-side Connection retries=45 2024-12-07T17:46:05,076 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-07T17:46:05,076 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-07T17:46:05,076 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-07T17:46:05,076 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-07T17:46:05,076 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-07T17:46:05,078 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-07T17:46:05,080 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-07T17:46:05,081 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:36517 2024-12-07T17:46:05,083 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:36517 connecting to ZooKeeper ensemble=127.0.0.1:60863 2024-12-07T17:46:05,084 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T17:46:05,088 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T17:46:05,111 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:365170x0, quorum=127.0.0.1:60863, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-07T17:46:05,112 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36517-0x100017040af0001, quorum=127.0.0.1:60863, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-07T17:46:05,112 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:36517-0x100017040af0001 connected 2024-12-07T17:46:05,117 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-07T17:46:05,125 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-07T17:46:05,128 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36517-0x100017040af0001, quorum=127.0.0.1:60863, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-07T17:46:05,133 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36517-0x100017040af0001, quorum=127.0.0.1:60863, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-07T17:46:05,134 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=36517 2024-12-07T17:46:05,134 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=36517 2024-12-07T17:46:05,135 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=36517 2024-12-07T17:46:05,135 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=36517 2024-12-07T17:46:05,136 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=36517 2024-12-07T17:46:05,150 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/bd53b59592b3:0 server-side Connection retries=45 2024-12-07T17:46:05,150 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-07T17:46:05,150 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-07T17:46:05,151 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-07T17:46:05,151 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-07T17:46:05,151 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-07T17:46:05,151 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-07T17:46:05,151 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-07T17:46:05,152 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:44343 2024-12-07T17:46:05,153 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:44343 connecting to ZooKeeper ensemble=127.0.0.1:60863 2024-12-07T17:46:05,155 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T17:46:05,157 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T17:46:05,195 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:443430x0, quorum=127.0.0.1:60863, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-07T17:46:05,196 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:44343-0x100017040af0002 connected 2024-12-07T17:46:05,196 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44343-0x100017040af0002, quorum=127.0.0.1:60863, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-07T17:46:05,197 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-07T17:46:05,199 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-07T17:46:05,201 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44343-0x100017040af0002, quorum=127.0.0.1:60863, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-07T17:46:05,204 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44343-0x100017040af0002, quorum=127.0.0.1:60863, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-07T17:46:05,205 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=44343 2024-12-07T17:46:05,205 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=44343 2024-12-07T17:46:05,207 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=44343 2024-12-07T17:46:05,208 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=44343 2024-12-07T17:46:05,208 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=44343 2024-12-07T17:46:05,227 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/bd53b59592b3:0 server-side Connection retries=45 2024-12-07T17:46:05,227 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-07T17:46:05,227 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-07T17:46:05,227 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-07T17:46:05,228 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-07T17:46:05,228 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-07T17:46:05,228 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-07T17:46:05,228 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-07T17:46:05,229 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:38965 2024-12-07T17:46:05,230 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:38965 connecting to ZooKeeper ensemble=127.0.0.1:60863 2024-12-07T17:46:05,231 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T17:46:05,233 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T17:46:05,263 DEBUG [pool-77-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:389650x0, quorum=127.0.0.1:60863, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-07T17:46:05,264 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38965-0x100017040af0003, quorum=127.0.0.1:60863, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-07T17:46:05,264 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:38965-0x100017040af0003 connected 2024-12-07T17:46:05,264 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-07T17:46:05,265 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-07T17:46:05,266 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38965-0x100017040af0003, quorum=127.0.0.1:60863, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-07T17:46:05,268 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38965-0x100017040af0003, quorum=127.0.0.1:60863, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-07T17:46:05,268 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=38965 2024-12-07T17:46:05,269 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=38965 2024-12-07T17:46:05,269 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=38965 2024-12-07T17:46:05,270 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=38965 2024-12-07T17:46:05,270 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=38965 2024-12-07T17:46:05,285 DEBUG [M:0;bd53b59592b3:35933 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;bd53b59592b3:35933 2024-12-07T17:46:05,286 INFO [master/bd53b59592b3:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/bd53b59592b3,35933,1733593564405 2024-12-07T17:46:05,296 DEBUG [pool-77-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38965-0x100017040af0003, quorum=127.0.0.1:60863, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-07T17:46:05,296 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35933-0x100017040af0000, quorum=127.0.0.1:60863, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-07T17:46:05,297 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36517-0x100017040af0001, quorum=127.0.0.1:60863, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-07T17:46:05,296 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44343-0x100017040af0002, quorum=127.0.0.1:60863, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-07T17:46:05,299 DEBUG [master/bd53b59592b3:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:35933-0x100017040af0000, quorum=127.0.0.1:60863, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/bd53b59592b3,35933,1733593564405 2024-12-07T17:46:05,328 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36517-0x100017040af0001, quorum=127.0.0.1:60863, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-07T17:46:05,328 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35933-0x100017040af0000, quorum=127.0.0.1:60863, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T17:46:05,328 DEBUG [pool-77-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38965-0x100017040af0003, quorum=127.0.0.1:60863, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-07T17:46:05,328 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44343-0x100017040af0002, quorum=127.0.0.1:60863, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-07T17:46:05,328 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36517-0x100017040af0001, quorum=127.0.0.1:60863, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T17:46:05,328 DEBUG [pool-77-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38965-0x100017040af0003, quorum=127.0.0.1:60863, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T17:46:05,328 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44343-0x100017040af0002, quorum=127.0.0.1:60863, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T17:46:05,329 DEBUG [master/bd53b59592b3:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:35933-0x100017040af0000, quorum=127.0.0.1:60863, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-07T17:46:05,330 INFO [master/bd53b59592b3:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/bd53b59592b3,35933,1733593564405 from backup master directory 2024-12-07T17:46:05,338 DEBUG [pool-77-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38965-0x100017040af0003, quorum=127.0.0.1:60863, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-07T17:46:05,338 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44343-0x100017040af0002, quorum=127.0.0.1:60863, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-07T17:46:05,338 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35933-0x100017040af0000, quorum=127.0.0.1:60863, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/bd53b59592b3,35933,1733593564405 2024-12-07T17:46:05,338 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36517-0x100017040af0001, quorum=127.0.0.1:60863, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-07T17:46:05,338 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35933-0x100017040af0000, quorum=127.0.0.1:60863, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-07T17:46:05,339 WARN [master/bd53b59592b3:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-07T17:46:05,340 INFO [master/bd53b59592b3:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=bd53b59592b3,35933,1733593564405 2024-12-07T17:46:05,342 INFO [master/bd53b59592b3:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-12-07T17:46:05,343 INFO [master/bd53b59592b3:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-12-07T17:46:05,399 DEBUG [master/bd53b59592b3:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:35255/user/jenkins/test-data/54aa0d7e-ded0-c68b-f87e-78244f096baf/hbase.id] with ID: 7af12544-0327-47ed-8209-dd2204f6e024 2024-12-07T17:46:05,399 DEBUG [master/bd53b59592b3:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:35255/user/jenkins/test-data/54aa0d7e-ded0-c68b-f87e-78244f096baf/.tmp/hbase.id 2024-12-07T17:46:05,406 WARN [master/bd53b59592b3:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-07T17:46:05,406 WARN [master/bd53b59592b3:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-07T17:46:05,413 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-941362637_22 at /127.0.0.1:54782 [Receiving block BP-1730434801-172.17.0.2-1733593560554:blk_-9223372036854775776_1003] {}] datanode.DataXceiver(331): 127.0.0.1:38991:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54782 dst: /127.0.0.1:38991 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T17:46:05,420 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38991 is added to blk_-9223372036854775776_1004 (size=42) 2024-12-07T17:46:05,421 WARN [master/bd53b59592b3:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-07T17:46:05,421 DEBUG [master/bd53b59592b3:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:35255/user/jenkins/test-data/54aa0d7e-ded0-c68b-f87e-78244f096baf/.tmp/hbase.id]:[hdfs://localhost:35255/user/jenkins/test-data/54aa0d7e-ded0-c68b-f87e-78244f096baf/hbase.id] 2024-12-07T17:46:05,463 INFO [master/bd53b59592b3:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T17:46:05,467 INFO [master/bd53b59592b3:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-07T17:46:05,483 INFO [master/bd53b59592b3:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 15ms. 2024-12-07T17:46:05,503 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36517-0x100017040af0001, quorum=127.0.0.1:60863, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T17:46:05,503 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35933-0x100017040af0000, quorum=127.0.0.1:60863, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T17:46:05,503 DEBUG [pool-77-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38965-0x100017040af0003, quorum=127.0.0.1:60863, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T17:46:05,503 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44343-0x100017040af0002, quorum=127.0.0.1:60863, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T17:46:05,517 WARN [master/bd53b59592b3:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-07T17:46:05,517 WARN [master/bd53b59592b3:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-07T17:46:05,520 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-941362637_22 at /127.0.0.1:54808 [Receiving block BP-1730434801-172.17.0.2-1733593560554:blk_-9223372036854775760_1005] {}] datanode.DataXceiver(331): 127.0.0.1:38991:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54808 dst: /127.0.0.1:38991 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T17:46:05,525 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38991 is added to blk_-9223372036854775760_1006 (size=196) 2024-12-07T17:46:05,527 WARN [master/bd53b59592b3:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-07T17:46:05,540 INFO [master/bd53b59592b3:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-07T17:46:05,542 INFO [master/bd53b59592b3:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-07T17:46:05,547 INFO [master/bd53b59592b3:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-07T17:46:05,572 WARN [master/bd53b59592b3:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-07T17:46:05,572 WARN [master/bd53b59592b3:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-07T17:46:05,575 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-941362637_22 at /127.0.0.1:54820 [Receiving block BP-1730434801-172.17.0.2-1733593560554:blk_-9223372036854775744_1007] {}] datanode.DataXceiver(331): 127.0.0.1:38991:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54820 dst: /127.0.0.1:38991 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T17:46:05,579 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38991 is added to blk_-9223372036854775744_1008 (size=1189) 2024-12-07T17:46:05,580 WARN [master/bd53b59592b3:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-07T17:46:05,595 INFO [master/bd53b59592b3:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:35255/user/jenkins/test-data/54aa0d7e-ded0-c68b-f87e-78244f096baf/MasterData/data/master/store 2024-12-07T17:46:05,611 WARN [master/bd53b59592b3:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-07T17:46:05,612 WARN [master/bd53b59592b3:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-07T17:46:05,615 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-941362637_22 at /127.0.0.1:50982 [Receiving block BP-1730434801-172.17.0.2-1733593560554:blk_-9223372036854775728_1009] {}] datanode.DataXceiver(331): 127.0.0.1:41011:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:50982 dst: /127.0.0.1:41011 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T17:46:05,620 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41011 is added to blk_-9223372036854775728_1010 (size=34) 2024-12-07T17:46:05,621 WARN [master/bd53b59592b3:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-07T17:46:05,625 INFO [master/bd53b59592b3:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-12-07T17:46:05,629 DEBUG [master/bd53b59592b3:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T17:46:05,630 DEBUG [master/bd53b59592b3:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-07T17:46:05,630 INFO [master/bd53b59592b3:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T17:46:05,631 DEBUG [master/bd53b59592b3:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T17:46:05,633 DEBUG [master/bd53b59592b3:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-07T17:46:05,633 DEBUG [master/bd53b59592b3:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T17:46:05,633 INFO [master/bd53b59592b3:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T17:46:05,634 DEBUG [master/bd53b59592b3:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733593565630Disabling compacts and flushes for region at 1733593565630Disabling writes for close at 1733593565633 (+3 ms)Writing region close event to WAL at 1733593565633Closed at 1733593565633 2024-12-07T17:46:05,636 WARN [master/bd53b59592b3:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:35255/user/jenkins/test-data/54aa0d7e-ded0-c68b-f87e-78244f096baf/MasterData/data/master/store/.initializing 2024-12-07T17:46:05,637 DEBUG [master/bd53b59592b3:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:35255/user/jenkins/test-data/54aa0d7e-ded0-c68b-f87e-78244f096baf/MasterData/WALs/bd53b59592b3,35933,1733593564405 2024-12-07T17:46:05,645 INFO [master/bd53b59592b3:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-07T17:46:05,660 INFO [master/bd53b59592b3:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=bd53b59592b3%2C35933%2C1733593564405, suffix=, logDir=hdfs://localhost:35255/user/jenkins/test-data/54aa0d7e-ded0-c68b-f87e-78244f096baf/MasterData/WALs/bd53b59592b3,35933,1733593564405, archiveDir=hdfs://localhost:35255/user/jenkins/test-data/54aa0d7e-ded0-c68b-f87e-78244f096baf/MasterData/oldWALs, maxLogs=10 2024-12-07T17:46:05,686 DEBUG [master/bd53b59592b3:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/54aa0d7e-ded0-c68b-f87e-78244f096baf/MasterData/WALs/bd53b59592b3,35933,1733593564405/bd53b59592b3%2C35933%2C1733593564405.1733593565664, exclude list is [], retry=0 2024-12-07T17:46:05,702 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(244): No decryptEncryptedDataEncryptionKey method in DFSClient, should be hadoop version with HDFS-12396 java.lang.NoSuchMethodException: org.apache.hadoop.hdfs.DFSClient.decryptEncryptedDataEncryptionKey(org.apache.hadoop.fs.FileEncryptionInfo) at java.lang.Class.getDeclaredMethod(Class.java:2675) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelperWithoutHDFS12396(FanOutOneBlockAsyncDFSOutputSaslHelper.java:183) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelper(FanOutOneBlockAsyncDFSOutputSaslHelper.java:242) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.(FanOutOneBlockAsyncDFSOutputSaslHelper.java:253) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.initialize(FanOutOneBlockAsyncDFSOutputHelper.java:413) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper$5.operationComplete(FanOutOneBlockAsyncDFSOutputHelper.java:472) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper$5.operationComplete(FanOutOneBlockAsyncDFSOutputHelper.java:467) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.NettyFutureUtils.lambda$addListener$0(NettyFutureUtils.java:56) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListener0(DefaultPromise.java:590) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListeners0(DefaultPromise.java:583) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListenersNow(DefaultPromise.java:559) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListeners(DefaultPromise.java:492) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.setValue0(DefaultPromise.java:636) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.setSuccess0(DefaultPromise.java:625) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.trySuccess(DefaultPromise.java:105) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPromise.trySuccess(DefaultChannelPromise.java:84) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.fulfillConnectPromise(AbstractEpollChannel.java:658) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.finishConnect(AbstractEpollChannel.java:696) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.epollOutReady(AbstractEpollChannel.java:567) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.processReady(EpollEventLoop.java:491) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:399) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T17:46:05,704 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:41011,DS-deaa7d0e-da30-4026-b202-38064c06578f,DISK] 2024-12-07T17:46:05,704 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38991,DS-4c651403-0178-49b1-8770-385cd4b250ca,DISK] 2024-12-07T17:46:05,704 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38303,DS-3843ebc5-805b-4b6e-a742-a831020a81a9,DISK] 2024-12-07T17:46:05,706 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.ProtobufDecoder(117): Hadoop 3.3 and above shades protobuf. 2024-12-07T17:46:05,746 INFO [master/bd53b59592b3:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/54aa0d7e-ded0-c68b-f87e-78244f096baf/MasterData/WALs/bd53b59592b3,35933,1733593564405/bd53b59592b3%2C35933%2C1733593564405.1733593565664 2024-12-07T17:46:05,747 DEBUG [master/bd53b59592b3:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:34253:34253),(127.0.0.1/127.0.0.1:43253:43253),(127.0.0.1/127.0.0.1:40873:40873)] 2024-12-07T17:46:05,748 DEBUG [master/bd53b59592b3:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-07T17:46:05,748 DEBUG [master/bd53b59592b3:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T17:46:05,751 DEBUG [master/bd53b59592b3:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-07T17:46:05,751 DEBUG [master/bd53b59592b3:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-07T17:46:05,782 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-07T17:46:05,803 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-07T17:46:05,806 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T17:46:05,809 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T17:46:05,809 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-07T17:46:05,812 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-07T17:46:05,813 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T17:46:05,814 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T17:46:05,814 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-07T17:46:05,817 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-07T17:46:05,818 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T17:46:05,819 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T17:46:05,819 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-07T17:46:05,823 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-07T17:46:05,823 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T17:46:05,824 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T17:46:05,824 DEBUG [master/bd53b59592b3:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-07T17:46:05,829 DEBUG [master/bd53b59592b3:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35255/user/jenkins/test-data/54aa0d7e-ded0-c68b-f87e-78244f096baf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-07T17:46:05,830 DEBUG [master/bd53b59592b3:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35255/user/jenkins/test-data/54aa0d7e-ded0-c68b-f87e-78244f096baf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-07T17:46:05,835 DEBUG [master/bd53b59592b3:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-07T17:46:05,835 DEBUG [master/bd53b59592b3:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-07T17:46:05,838 DEBUG [master/bd53b59592b3:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-07T17:46:05,841 DEBUG [master/bd53b59592b3:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-07T17:46:05,847 DEBUG [master/bd53b59592b3:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35255/user/jenkins/test-data/54aa0d7e-ded0-c68b-f87e-78244f096baf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-07T17:46:05,849 INFO [master/bd53b59592b3:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=75262017, jitterRate=0.1214914470911026}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-07T17:46:05,857 DEBUG [master/bd53b59592b3:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733593565762Initializing all the Stores at 1733593565764 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733593565765 (+1 ms)Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733593565765Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733593565765Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733593565766 (+1 ms)Cleaning up temporary data from old regions at 1733593565835 (+69 ms)Region opened successfully at 1733593565857 (+22 ms) 2024-12-07T17:46:05,858 INFO [master/bd53b59592b3:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-07T17:46:05,895 DEBUG [master/bd53b59592b3:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@9334027, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=bd53b59592b3/172.17.0.2:0 2024-12-07T17:46:05,925 INFO [master/bd53b59592b3:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-07T17:46:05,934 INFO [master/bd53b59592b3:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-07T17:46:05,934 INFO [master/bd53b59592b3:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-07T17:46:05,936 INFO [master/bd53b59592b3:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-07T17:46:05,938 INFO [master/bd53b59592b3:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 1 msec 2024-12-07T17:46:05,944 INFO [master/bd53b59592b3:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 5 msec 2024-12-07T17:46:05,944 INFO [master/bd53b59592b3:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-07T17:46:05,970 INFO [master/bd53b59592b3:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-07T17:46:05,978 DEBUG [master/bd53b59592b3:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35933-0x100017040af0000, quorum=127.0.0.1:60863, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-07T17:46:06,027 DEBUG [master/bd53b59592b3:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-07T17:46:06,031 INFO [master/bd53b59592b3:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-07T17:46:06,033 DEBUG [master/bd53b59592b3:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35933-0x100017040af0000, quorum=127.0.0.1:60863, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-07T17:46:06,044 DEBUG [master/bd53b59592b3:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-07T17:46:06,046 INFO [master/bd53b59592b3:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-07T17:46:06,049 DEBUG [master/bd53b59592b3:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35933-0x100017040af0000, quorum=127.0.0.1:60863, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-07T17:46:06,054 DEBUG [master/bd53b59592b3:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-07T17:46:06,056 DEBUG [master/bd53b59592b3:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35933-0x100017040af0000, quorum=127.0.0.1:60863, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-07T17:46:06,063 DEBUG [master/bd53b59592b3:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-07T17:46:06,083 DEBUG [master/bd53b59592b3:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35933-0x100017040af0000, quorum=127.0.0.1:60863, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-07T17:46:06,094 DEBUG [master/bd53b59592b3:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-07T17:46:06,104 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36517-0x100017040af0001, quorum=127.0.0.1:60863, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-07T17:46:06,104 DEBUG [pool-77-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38965-0x100017040af0003, quorum=127.0.0.1:60863, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-07T17:46:06,105 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44343-0x100017040af0002, quorum=127.0.0.1:60863, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-07T17:46:06,105 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35933-0x100017040af0000, quorum=127.0.0.1:60863, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-07T17:46:06,105 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36517-0x100017040af0001, quorum=127.0.0.1:60863, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T17:46:06,105 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44343-0x100017040af0002, quorum=127.0.0.1:60863, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T17:46:06,105 DEBUG [pool-77-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38965-0x100017040af0003, quorum=127.0.0.1:60863, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T17:46:06,105 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35933-0x100017040af0000, quorum=127.0.0.1:60863, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T17:46:06,109 INFO [master/bd53b59592b3:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=bd53b59592b3,35933,1733593564405, sessionid=0x100017040af0000, setting cluster-up flag (Was=false) 2024-12-07T17:46:06,138 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35933-0x100017040af0000, quorum=127.0.0.1:60863, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T17:46:06,138 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36517-0x100017040af0001, quorum=127.0.0.1:60863, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T17:46:06,138 DEBUG [pool-77-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38965-0x100017040af0003, quorum=127.0.0.1:60863, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T17:46:06,138 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44343-0x100017040af0002, quorum=127.0.0.1:60863, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T17:46:06,163 DEBUG [master/bd53b59592b3:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-07T17:46:06,166 DEBUG [master/bd53b59592b3:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=bd53b59592b3,35933,1733593564405 2024-12-07T17:46:06,188 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35933-0x100017040af0000, quorum=127.0.0.1:60863, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T17:46:06,188 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44343-0x100017040af0002, quorum=127.0.0.1:60863, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T17:46:06,188 DEBUG [pool-77-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38965-0x100017040af0003, quorum=127.0.0.1:60863, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T17:46:06,189 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36517-0x100017040af0001, quorum=127.0.0.1:60863, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T17:46:06,213 DEBUG [master/bd53b59592b3:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-07T17:46:06,215 DEBUG [master/bd53b59592b3:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=bd53b59592b3,35933,1733593564405 2024-12-07T17:46:06,224 INFO [master/bd53b59592b3:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:35255/user/jenkins/test-data/54aa0d7e-ded0-c68b-f87e-78244f096baf/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-07T17:46:06,274 INFO [RS:0;bd53b59592b3:36517 {}] regionserver.HRegionServer(746): ClusterId : 7af12544-0327-47ed-8209-dd2204f6e024 2024-12-07T17:46:06,274 INFO [RS:1;bd53b59592b3:44343 {}] regionserver.HRegionServer(746): ClusterId : 7af12544-0327-47ed-8209-dd2204f6e024 2024-12-07T17:46:06,274 INFO [RS:2;bd53b59592b3:38965 {}] regionserver.HRegionServer(746): ClusterId : 7af12544-0327-47ed-8209-dd2204f6e024 2024-12-07T17:46:06,276 DEBUG [RS:2;bd53b59592b3:38965 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-07T17:46:06,276 DEBUG [RS:1;bd53b59592b3:44343 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-07T17:46:06,276 DEBUG [RS:0;bd53b59592b3:36517 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-07T17:46:06,289 DEBUG [master/bd53b59592b3:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-07T17:46:06,298 INFO [master/bd53b59592b3:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-07T17:46:06,304 INFO [master/bd53b59592b3:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-07T17:46:06,308 DEBUG [master/bd53b59592b3:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: bd53b59592b3,35933,1733593564405 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-07T17:46:06,323 DEBUG [RS:1;bd53b59592b3:44343 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-07T17:46:06,323 DEBUG [RS:0;bd53b59592b3:36517 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-07T17:46:06,323 DEBUG [RS:2;bd53b59592b3:38965 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-07T17:46:06,323 DEBUG [RS:0;bd53b59592b3:36517 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-07T17:46:06,323 DEBUG [RS:1;bd53b59592b3:44343 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-07T17:46:06,323 DEBUG [RS:2;bd53b59592b3:38965 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-07T17:46:06,323 DEBUG [master/bd53b59592b3:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/bd53b59592b3:0, corePoolSize=5, maxPoolSize=5 2024-12-07T17:46:06,324 DEBUG [master/bd53b59592b3:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/bd53b59592b3:0, corePoolSize=5, maxPoolSize=5 2024-12-07T17:46:06,324 DEBUG [master/bd53b59592b3:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/bd53b59592b3:0, corePoolSize=5, maxPoolSize=5 2024-12-07T17:46:06,324 DEBUG [master/bd53b59592b3:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/bd53b59592b3:0, corePoolSize=5, maxPoolSize=5 2024-12-07T17:46:06,324 DEBUG [master/bd53b59592b3:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/bd53b59592b3:0, corePoolSize=10, maxPoolSize=10 2024-12-07T17:46:06,324 DEBUG [master/bd53b59592b3:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/bd53b59592b3:0, corePoolSize=1, maxPoolSize=1 2024-12-07T17:46:06,324 DEBUG [master/bd53b59592b3:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/bd53b59592b3:0, corePoolSize=2, maxPoolSize=2 2024-12-07T17:46:06,324 DEBUG [master/bd53b59592b3:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/bd53b59592b3:0, corePoolSize=1, maxPoolSize=1 2024-12-07T17:46:06,325 INFO [master/bd53b59592b3:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733593596325 2024-12-07T17:46:06,327 INFO [master/bd53b59592b3:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-07T17:46:06,328 INFO [master/bd53b59592b3:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-07T17:46:06,329 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-07T17:46:06,330 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-07T17:46:06,332 INFO [master/bd53b59592b3:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-07T17:46:06,332 INFO [master/bd53b59592b3:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-07T17:46:06,333 INFO [master/bd53b59592b3:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-07T17:46:06,333 INFO [master/bd53b59592b3:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-07T17:46:06,333 INFO [master/bd53b59592b3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-07T17:46:06,335 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T17:46:06,335 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-07T17:46:06,336 INFO [master/bd53b59592b3:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-07T17:46:06,337 INFO [master/bd53b59592b3:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-07T17:46:06,337 INFO [master/bd53b59592b3:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-07T17:46:06,339 DEBUG [RS:0;bd53b59592b3:36517 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-07T17:46:06,339 DEBUG [RS:2;bd53b59592b3:38965 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-07T17:46:06,339 DEBUG [RS:1;bd53b59592b3:44343 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-07T17:46:06,339 DEBUG [RS:0;bd53b59592b3:36517 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@525c7dc8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=bd53b59592b3/172.17.0.2:0 2024-12-07T17:46:06,339 DEBUG [RS:2;bd53b59592b3:38965 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@69c24cac, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=bd53b59592b3/172.17.0.2:0 2024-12-07T17:46:06,339 DEBUG [RS:1;bd53b59592b3:44343 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5c6354a0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=bd53b59592b3/172.17.0.2:0 2024-12-07T17:46:06,339 INFO [master/bd53b59592b3:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-07T17:46:06,340 INFO [master/bd53b59592b3:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-07T17:46:06,342 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-07T17:46:06,342 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-07T17:46:06,343 DEBUG [master/bd53b59592b3:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/bd53b59592b3:0:becomeActiveMaster-HFileCleaner.large.0-1733593566341,5,FailOnTimeoutGroup] 2024-12-07T17:46:06,348 DEBUG [master/bd53b59592b3:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/bd53b59592b3:0:becomeActiveMaster-HFileCleaner.small.0-1733593566344,5,FailOnTimeoutGroup] 2024-12-07T17:46:06,348 INFO [master/bd53b59592b3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-07T17:46:06,348 INFO [master/bd53b59592b3:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-07T17:46:06,349 INFO [master/bd53b59592b3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-07T17:46:06,350 INFO [master/bd53b59592b3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-07T17:46:06,353 DEBUG [RS:2;bd53b59592b3:38965 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:2;bd53b59592b3:38965 2024-12-07T17:46:06,353 DEBUG [RS:1;bd53b59592b3:44343 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;bd53b59592b3:44343 2024-12-07T17:46:06,354 DEBUG [RS:0;bd53b59592b3:36517 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;bd53b59592b3:36517 2024-12-07T17:46:06,356 INFO [RS:0;bd53b59592b3:36517 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-07T17:46:06,356 INFO [RS:2;bd53b59592b3:38965 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-07T17:46:06,356 INFO [RS:0;bd53b59592b3:36517 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-07T17:46:06,356 INFO [RS:2;bd53b59592b3:38965 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-07T17:46:06,356 DEBUG [RS:2;bd53b59592b3:38965 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-07T17:46:06,356 DEBUG [RS:0;bd53b59592b3:36517 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-07T17:46:06,357 INFO [RS:1;bd53b59592b3:44343 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-07T17:46:06,357 INFO [RS:1;bd53b59592b3:44343 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-07T17:46:06,357 DEBUG [RS:1;bd53b59592b3:44343 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-07T17:46:06,357 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-941362637_22 at /127.0.0.1:50996 [Receiving block BP-1730434801-172.17.0.2-1733593560554:blk_-9223372036854775712_1012] {}] datanode.DataXceiver(331): 127.0.0.1:41011:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:50996 dst: /127.0.0.1:41011 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T17:46:06,359 INFO [RS:2;bd53b59592b3:38965 {}] regionserver.HRegionServer(2659): reportForDuty to master=bd53b59592b3,35933,1733593564405 with port=38965, startcode=1733593565227 2024-12-07T17:46:06,359 INFO [RS:0;bd53b59592b3:36517 {}] regionserver.HRegionServer(2659): reportForDuty to master=bd53b59592b3,35933,1733593564405 with port=36517, startcode=1733593565044 2024-12-07T17:46:06,359 INFO [RS:1;bd53b59592b3:44343 {}] regionserver.HRegionServer(2659): reportForDuty to master=bd53b59592b3,35933,1733593564405 with port=44343, startcode=1733593565149 2024-12-07T17:46:06,367 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41011 is added to blk_-9223372036854775712_1013 (size=1321) 2024-12-07T17:46:06,368 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-07T17:46:06,370 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:35255/user/jenkins/test-data/54aa0d7e-ded0-c68b-f87e-78244f096baf/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-07T17:46:06,370 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:35255/user/jenkins/test-data/54aa0d7e-ded0-c68b-f87e-78244f096baf 2024-12-07T17:46:06,373 DEBUG [RS:2;bd53b59592b3:38965 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-07T17:46:06,373 DEBUG [RS:1;bd53b59592b3:44343 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-07T17:46:06,373 DEBUG [RS:0;bd53b59592b3:36517 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-07T17:46:06,377 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-07T17:46:06,377 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-07T17:46:06,386 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-941362637_22 at /127.0.0.1:51016 [Receiving block BP-1730434801-172.17.0.2-1733593560554:blk_-9223372036854775696_1014] {}] datanode.DataXceiver(331): 127.0.0.1:41011:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51016 dst: /127.0.0.1:41011 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T17:46:06,395 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41011 is added to blk_-9223372036854775696_1015 (size=32) 2024-12-07T17:46:06,396 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-07T17:46:06,398 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T17:46:06,401 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-07T17:46:06,409 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-07T17:46:06,409 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T17:46:06,411 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T17:46:06,411 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-07T17:46:06,414 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-07T17:46:06,414 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T17:46:06,415 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41679, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-12-07T17:46:06,415 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34283, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-12-07T17:46:06,415 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37797, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-12-07T17:46:06,419 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T17:46:06,419 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-07T17:46:06,421 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35933 {}] master.ServerManager(363): Checking decommissioned status of RegionServer bd53b59592b3,38965,1733593565227 2024-12-07T17:46:06,424 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35933 {}] master.ServerManager(517): Registering regionserver=bd53b59592b3,38965,1733593565227 2024-12-07T17:46:06,425 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38303 is added to blk_-9223372036854775789_1002 (size=7) 2024-12-07T17:46:06,426 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-07T17:46:06,426 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T17:46:06,427 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T17:46:06,428 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-07T17:46:06,429 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38991 is added to blk_-9223372036854775788_1002 (size=7) 2024-12-07T17:46:06,431 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-07T17:46:06,432 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T17:46:06,433 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T17:46:06,433 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-07T17:46:06,435 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35255/user/jenkins/test-data/54aa0d7e-ded0-c68b-f87e-78244f096baf/data/hbase/meta/1588230740 2024-12-07T17:46:06,436 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35255/user/jenkins/test-data/54aa0d7e-ded0-c68b-f87e-78244f096baf/data/hbase/meta/1588230740 2024-12-07T17:46:06,437 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35933 {}] master.ServerManager(363): Checking decommissioned status of RegionServer bd53b59592b3,44343,1733593565149 2024-12-07T17:46:06,437 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35933 {}] master.ServerManager(517): Registering regionserver=bd53b59592b3,44343,1733593565149 2024-12-07T17:46:06,440 DEBUG [RS:2;bd53b59592b3:38965 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:35255/user/jenkins/test-data/54aa0d7e-ded0-c68b-f87e-78244f096baf 2024-12-07T17:46:06,441 DEBUG [RS:2;bd53b59592b3:38965 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:35255 2024-12-07T17:46:06,441 DEBUG [RS:2;bd53b59592b3:38965 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-07T17:46:06,442 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35933 {}] master.ServerManager(363): Checking decommissioned status of RegionServer bd53b59592b3,36517,1733593565044 2024-12-07T17:46:06,442 DEBUG [RS:1;bd53b59592b3:44343 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:35255/user/jenkins/test-data/54aa0d7e-ded0-c68b-f87e-78244f096baf 2024-12-07T17:46:06,442 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35933 {}] master.ServerManager(517): Registering regionserver=bd53b59592b3,36517,1733593565044 2024-12-07T17:46:06,442 DEBUG [RS:1;bd53b59592b3:44343 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:35255 2024-12-07T17:46:06,442 DEBUG [RS:1;bd53b59592b3:44343 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-07T17:46:06,443 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-07T17:46:06,443 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-07T17:46:06,444 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-07T17:46:06,447 DEBUG [RS:0;bd53b59592b3:36517 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:35255/user/jenkins/test-data/54aa0d7e-ded0-c68b-f87e-78244f096baf 2024-12-07T17:46:06,447 DEBUG [RS:0;bd53b59592b3:36517 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:35255 2024-12-07T17:46:06,447 DEBUG [RS:0;bd53b59592b3:36517 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-07T17:46:06,448 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-07T17:46:06,457 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35255/user/jenkins/test-data/54aa0d7e-ded0-c68b-f87e-78244f096baf/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-07T17:46:06,459 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=62334011, jitterRate=-0.07115085422992706}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-07T17:46:06,461 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733593566398Initializing all the Stores at 1733593566400 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733593566400Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733593566401 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733593566401Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733593566401Cleaning up temporary data from old regions at 1733593566443 (+42 ms)Region opened successfully at 1733593566460 (+17 ms) 2024-12-07T17:46:06,461 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-07T17:46:06,461 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-07T17:46:06,461 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-07T17:46:06,461 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-07T17:46:06,461 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-07T17:46:06,463 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-07T17:46:06,463 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733593566461Disabling compacts and flushes for region at 1733593566461Disabling writes for close at 1733593566461Writing region close event to WAL at 1733593566462 (+1 ms)Closed at 1733593566462 2024-12-07T17:46:06,465 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-07T17:46:06,466 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-07T17:46:06,472 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-07T17:46:06,477 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35933-0x100017040af0000, quorum=127.0.0.1:60863, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-07T17:46:06,480 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-07T17:46:06,483 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-07T17:46:06,507 DEBUG [RS:2;bd53b59592b3:38965 {}] zookeeper.ZKUtil(111): regionserver:38965-0x100017040af0003, quorum=127.0.0.1:60863, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/bd53b59592b3,38965,1733593565227 2024-12-07T17:46:06,508 WARN [RS:2;bd53b59592b3:38965 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-07T17:46:06,508 INFO [RS:2;bd53b59592b3:38965 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-07T17:46:06,508 DEBUG [RS:0;bd53b59592b3:36517 {}] zookeeper.ZKUtil(111): regionserver:36517-0x100017040af0001, quorum=127.0.0.1:60863, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/bd53b59592b3,36517,1733593565044 2024-12-07T17:46:06,508 DEBUG [RS:1;bd53b59592b3:44343 {}] zookeeper.ZKUtil(111): regionserver:44343-0x100017040af0002, quorum=127.0.0.1:60863, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/bd53b59592b3,44343,1733593565149 2024-12-07T17:46:06,508 WARN [RS:0;bd53b59592b3:36517 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-07T17:46:06,508 WARN [RS:1;bd53b59592b3:44343 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-07T17:46:06,508 DEBUG [RS:2;bd53b59592b3:38965 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:35255/user/jenkins/test-data/54aa0d7e-ded0-c68b-f87e-78244f096baf/WALs/bd53b59592b3,38965,1733593565227 2024-12-07T17:46:06,508 INFO [RS:1;bd53b59592b3:44343 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-07T17:46:06,508 INFO [RS:0;bd53b59592b3:36517 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-07T17:46:06,508 DEBUG [RS:0;bd53b59592b3:36517 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:35255/user/jenkins/test-data/54aa0d7e-ded0-c68b-f87e-78244f096baf/WALs/bd53b59592b3,36517,1733593565044 2024-12-07T17:46:06,508 DEBUG [RS:1;bd53b59592b3:44343 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:35255/user/jenkins/test-data/54aa0d7e-ded0-c68b-f87e-78244f096baf/WALs/bd53b59592b3,44343,1733593565149 2024-12-07T17:46:06,509 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [bd53b59592b3,44343,1733593565149] 2024-12-07T17:46:06,509 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [bd53b59592b3,38965,1733593565227] 2024-12-07T17:46:06,509 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [bd53b59592b3,36517,1733593565044] 2024-12-07T17:46:06,531 INFO [RS:2;bd53b59592b3:38965 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-07T17:46:06,531 INFO [RS:0;bd53b59592b3:36517 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-07T17:46:06,531 INFO [RS:1;bd53b59592b3:44343 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-07T17:46:06,544 INFO [RS:2;bd53b59592b3:38965 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-07T17:46:06,544 INFO [RS:1;bd53b59592b3:44343 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-07T17:46:06,544 INFO [RS:0;bd53b59592b3:36517 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-07T17:46:06,548 INFO [RS:1;bd53b59592b3:44343 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-07T17:46:06,548 INFO [RS:2;bd53b59592b3:38965 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-07T17:46:06,548 INFO [RS:1;bd53b59592b3:44343 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-07T17:46:06,549 INFO [RS:2;bd53b59592b3:38965 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-07T17:46:06,551 INFO [RS:0;bd53b59592b3:36517 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-07T17:46:06,551 INFO [RS:0;bd53b59592b3:36517 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-07T17:46:06,552 INFO [RS:2;bd53b59592b3:38965 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-07T17:46:06,552 INFO [RS:1;bd53b59592b3:44343 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-07T17:46:06,552 INFO [RS:0;bd53b59592b3:36517 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-07T17:46:06,557 INFO [RS:1;bd53b59592b3:44343 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-07T17:46:06,558 INFO [RS:2;bd53b59592b3:38965 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-07T17:46:06,558 INFO [RS:0;bd53b59592b3:36517 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-07T17:46:06,559 INFO [RS:2;bd53b59592b3:38965 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-07T17:46:06,559 INFO [RS:0;bd53b59592b3:36517 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-07T17:46:06,559 INFO [RS:1;bd53b59592b3:44343 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-07T17:46:06,559 DEBUG [RS:2;bd53b59592b3:38965 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/bd53b59592b3:0, corePoolSize=1, maxPoolSize=1 2024-12-07T17:46:06,559 DEBUG [RS:1;bd53b59592b3:44343 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/bd53b59592b3:0, corePoolSize=1, maxPoolSize=1 2024-12-07T17:46:06,559 DEBUG [RS:0;bd53b59592b3:36517 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/bd53b59592b3:0, corePoolSize=1, maxPoolSize=1 2024-12-07T17:46:06,559 DEBUG [RS:2;bd53b59592b3:38965 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/bd53b59592b3:0, corePoolSize=1, maxPoolSize=1 2024-12-07T17:46:06,560 DEBUG [RS:1;bd53b59592b3:44343 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/bd53b59592b3:0, corePoolSize=1, maxPoolSize=1 2024-12-07T17:46:06,560 DEBUG [RS:0;bd53b59592b3:36517 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/bd53b59592b3:0, corePoolSize=1, maxPoolSize=1 2024-12-07T17:46:06,560 DEBUG [RS:2;bd53b59592b3:38965 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/bd53b59592b3:0, corePoolSize=1, maxPoolSize=1 2024-12-07T17:46:06,560 DEBUG [RS:1;bd53b59592b3:44343 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/bd53b59592b3:0, corePoolSize=1, maxPoolSize=1 2024-12-07T17:46:06,560 DEBUG [RS:0;bd53b59592b3:36517 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/bd53b59592b3:0, corePoolSize=1, maxPoolSize=1 2024-12-07T17:46:06,560 DEBUG [RS:2;bd53b59592b3:38965 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/bd53b59592b3:0, corePoolSize=1, maxPoolSize=1 2024-12-07T17:46:06,560 DEBUG [RS:2;bd53b59592b3:38965 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/bd53b59592b3:0, corePoolSize=1, maxPoolSize=1 2024-12-07T17:46:06,560 DEBUG [RS:0;bd53b59592b3:36517 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/bd53b59592b3:0, corePoolSize=1, maxPoolSize=1 2024-12-07T17:46:06,560 DEBUG [RS:1;bd53b59592b3:44343 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/bd53b59592b3:0, corePoolSize=1, maxPoolSize=1 2024-12-07T17:46:06,560 DEBUG [RS:2;bd53b59592b3:38965 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/bd53b59592b3:0, corePoolSize=2, maxPoolSize=2 2024-12-07T17:46:06,560 DEBUG [RS:0;bd53b59592b3:36517 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/bd53b59592b3:0, corePoolSize=1, maxPoolSize=1 2024-12-07T17:46:06,560 DEBUG [RS:1;bd53b59592b3:44343 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/bd53b59592b3:0, corePoolSize=1, maxPoolSize=1 2024-12-07T17:46:06,560 DEBUG [RS:2;bd53b59592b3:38965 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/bd53b59592b3:0, corePoolSize=1, maxPoolSize=1 2024-12-07T17:46:06,560 DEBUG [RS:0;bd53b59592b3:36517 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/bd53b59592b3:0, corePoolSize=2, maxPoolSize=2 2024-12-07T17:46:06,560 DEBUG [RS:1;bd53b59592b3:44343 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/bd53b59592b3:0, corePoolSize=2, maxPoolSize=2 2024-12-07T17:46:06,560 DEBUG [RS:2;bd53b59592b3:38965 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/bd53b59592b3:0, corePoolSize=1, maxPoolSize=1 2024-12-07T17:46:06,560 DEBUG [RS:0;bd53b59592b3:36517 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/bd53b59592b3:0, corePoolSize=1, maxPoolSize=1 2024-12-07T17:46:06,560 DEBUG [RS:2;bd53b59592b3:38965 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/bd53b59592b3:0, corePoolSize=1, maxPoolSize=1 2024-12-07T17:46:06,560 DEBUG [RS:1;bd53b59592b3:44343 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/bd53b59592b3:0, corePoolSize=1, maxPoolSize=1 2024-12-07T17:46:06,560 DEBUG [RS:2;bd53b59592b3:38965 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/bd53b59592b3:0, corePoolSize=1, maxPoolSize=1 2024-12-07T17:46:06,560 DEBUG [RS:0;bd53b59592b3:36517 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/bd53b59592b3:0, corePoolSize=1, maxPoolSize=1 2024-12-07T17:46:06,560 DEBUG [RS:1;bd53b59592b3:44343 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/bd53b59592b3:0, corePoolSize=1, maxPoolSize=1 2024-12-07T17:46:06,560 DEBUG [RS:2;bd53b59592b3:38965 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/bd53b59592b3:0, corePoolSize=1, maxPoolSize=1 2024-12-07T17:46:06,560 DEBUG [RS:0;bd53b59592b3:36517 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/bd53b59592b3:0, corePoolSize=1, maxPoolSize=1 2024-12-07T17:46:06,560 DEBUG [RS:2;bd53b59592b3:38965 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/bd53b59592b3:0, corePoolSize=1, maxPoolSize=1 2024-12-07T17:46:06,560 DEBUG [RS:1;bd53b59592b3:44343 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/bd53b59592b3:0, corePoolSize=1, maxPoolSize=1 2024-12-07T17:46:06,561 DEBUG [RS:0;bd53b59592b3:36517 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/bd53b59592b3:0, corePoolSize=1, maxPoolSize=1 2024-12-07T17:46:06,561 DEBUG [RS:1;bd53b59592b3:44343 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/bd53b59592b3:0, corePoolSize=1, maxPoolSize=1 2024-12-07T17:46:06,561 DEBUG [RS:2;bd53b59592b3:38965 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/bd53b59592b3:0, corePoolSize=3, maxPoolSize=3 2024-12-07T17:46:06,561 DEBUG [RS:0;bd53b59592b3:36517 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/bd53b59592b3:0, corePoolSize=1, maxPoolSize=1 2024-12-07T17:46:06,561 DEBUG [RS:1;bd53b59592b3:44343 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/bd53b59592b3:0, corePoolSize=1, maxPoolSize=1 2024-12-07T17:46:06,561 DEBUG [RS:2;bd53b59592b3:38965 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/bd53b59592b3:0, corePoolSize=3, maxPoolSize=3 2024-12-07T17:46:06,561 DEBUG [RS:0;bd53b59592b3:36517 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/bd53b59592b3:0, corePoolSize=1, maxPoolSize=1 2024-12-07T17:46:06,561 DEBUG [RS:1;bd53b59592b3:44343 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/bd53b59592b3:0, corePoolSize=1, maxPoolSize=1 2024-12-07T17:46:06,561 DEBUG [RS:0;bd53b59592b3:36517 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/bd53b59592b3:0, corePoolSize=3, maxPoolSize=3 2024-12-07T17:46:06,561 DEBUG [RS:1;bd53b59592b3:44343 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/bd53b59592b3:0, corePoolSize=3, maxPoolSize=3 2024-12-07T17:46:06,561 DEBUG [RS:0;bd53b59592b3:36517 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/bd53b59592b3:0, corePoolSize=3, maxPoolSize=3 2024-12-07T17:46:06,561 DEBUG [RS:1;bd53b59592b3:44343 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/bd53b59592b3:0, corePoolSize=3, maxPoolSize=3 2024-12-07T17:46:06,566 INFO [RS:1;bd53b59592b3:44343 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-07T17:46:06,566 INFO [RS:1;bd53b59592b3:44343 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-07T17:46:06,566 INFO [RS:1;bd53b59592b3:44343 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-07T17:46:06,567 INFO [RS:1;bd53b59592b3:44343 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-07T17:46:06,567 INFO [RS:1;bd53b59592b3:44343 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-07T17:46:06,567 INFO [RS:1;bd53b59592b3:44343 {}] hbase.ChoreService(168): Chore ScheduledChore name=bd53b59592b3,44343,1733593565149-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-07T17:46:06,567 INFO [RS:0;bd53b59592b3:36517 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-07T17:46:06,567 INFO [RS:0;bd53b59592b3:36517 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-07T17:46:06,567 INFO [RS:2;bd53b59592b3:38965 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-07T17:46:06,567 INFO [RS:0;bd53b59592b3:36517 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-07T17:46:06,567 INFO [RS:2;bd53b59592b3:38965 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-07T17:46:06,567 INFO [RS:0;bd53b59592b3:36517 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-07T17:46:06,567 INFO [RS:2;bd53b59592b3:38965 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-07T17:46:06,567 INFO [RS:0;bd53b59592b3:36517 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-07T17:46:06,567 INFO [RS:2;bd53b59592b3:38965 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-07T17:46:06,567 INFO [RS:0;bd53b59592b3:36517 {}] hbase.ChoreService(168): Chore ScheduledChore name=bd53b59592b3,36517,1733593565044-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-07T17:46:06,567 INFO [RS:2;bd53b59592b3:38965 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-07T17:46:06,567 INFO [RS:2;bd53b59592b3:38965 {}] hbase.ChoreService(168): Chore ScheduledChore name=bd53b59592b3,38965,1733593565227-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-07T17:46:06,584 INFO [RS:0;bd53b59592b3:36517 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-07T17:46:06,584 INFO [RS:2;bd53b59592b3:38965 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-07T17:46:06,584 INFO [RS:1;bd53b59592b3:44343 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-07T17:46:06,585 INFO [RS:0;bd53b59592b3:36517 {}] hbase.ChoreService(168): Chore ScheduledChore name=bd53b59592b3,36517,1733593565044-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-07T17:46:06,585 INFO [RS:2;bd53b59592b3:38965 {}] hbase.ChoreService(168): Chore ScheduledChore name=bd53b59592b3,38965,1733593565227-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-07T17:46:06,585 INFO [RS:1;bd53b59592b3:44343 {}] hbase.ChoreService(168): Chore ScheduledChore name=bd53b59592b3,44343,1733593565149-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-07T17:46:06,586 INFO [RS:0;bd53b59592b3:36517 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T17:46:06,586 INFO [RS:2;bd53b59592b3:38965 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T17:46:06,586 INFO [RS:1;bd53b59592b3:44343 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T17:46:06,586 INFO [RS:0;bd53b59592b3:36517 {}] regionserver.Replication(171): bd53b59592b3,36517,1733593565044 started 2024-12-07T17:46:06,586 INFO [RS:2;bd53b59592b3:38965 {}] regionserver.Replication(171): bd53b59592b3,38965,1733593565227 started 2024-12-07T17:46:06,586 INFO [RS:1;bd53b59592b3:44343 {}] regionserver.Replication(171): bd53b59592b3,44343,1733593565149 started 2024-12-07T17:46:06,603 INFO [RS:0;bd53b59592b3:36517 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T17:46:06,604 INFO [RS:0;bd53b59592b3:36517 {}] regionserver.HRegionServer(1482): Serving as bd53b59592b3,36517,1733593565044, RpcServer on bd53b59592b3/172.17.0.2:36517, sessionid=0x100017040af0001 2024-12-07T17:46:06,605 DEBUG [RS:0;bd53b59592b3:36517 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-07T17:46:06,605 DEBUG [RS:0;bd53b59592b3:36517 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager bd53b59592b3,36517,1733593565044 2024-12-07T17:46:06,605 DEBUG [RS:0;bd53b59592b3:36517 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'bd53b59592b3,36517,1733593565044' 2024-12-07T17:46:06,605 DEBUG [RS:0;bd53b59592b3:36517 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-07T17:46:06,606 DEBUG [RS:0;bd53b59592b3:36517 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-07T17:46:06,607 DEBUG [RS:0;bd53b59592b3:36517 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-07T17:46:06,607 DEBUG [RS:0;bd53b59592b3:36517 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-07T17:46:06,607 DEBUG [RS:0;bd53b59592b3:36517 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager bd53b59592b3,36517,1733593565044 2024-12-07T17:46:06,607 DEBUG [RS:0;bd53b59592b3:36517 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'bd53b59592b3,36517,1733593565044' 2024-12-07T17:46:06,607 DEBUG [RS:0;bd53b59592b3:36517 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-07T17:46:06,608 DEBUG [RS:0;bd53b59592b3:36517 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-07T17:46:06,608 DEBUG [RS:0;bd53b59592b3:36517 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-07T17:46:06,608 INFO [RS:0;bd53b59592b3:36517 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-07T17:46:06,608 INFO [RS:0;bd53b59592b3:36517 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-07T17:46:06,609 INFO [RS:1;bd53b59592b3:44343 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T17:46:06,609 INFO [RS:2;bd53b59592b3:38965 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T17:46:06,609 INFO [RS:2;bd53b59592b3:38965 {}] regionserver.HRegionServer(1482): Serving as bd53b59592b3,38965,1733593565227, RpcServer on bd53b59592b3/172.17.0.2:38965, sessionid=0x100017040af0003 2024-12-07T17:46:06,609 INFO [RS:1;bd53b59592b3:44343 {}] regionserver.HRegionServer(1482): Serving as bd53b59592b3,44343,1733593565149, RpcServer on bd53b59592b3/172.17.0.2:44343, sessionid=0x100017040af0002 2024-12-07T17:46:06,610 DEBUG [RS:1;bd53b59592b3:44343 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-07T17:46:06,610 DEBUG [RS:2;bd53b59592b3:38965 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-07T17:46:06,610 DEBUG [RS:1;bd53b59592b3:44343 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager bd53b59592b3,44343,1733593565149 2024-12-07T17:46:06,610 DEBUG [RS:2;bd53b59592b3:38965 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager bd53b59592b3,38965,1733593565227 2024-12-07T17:46:06,610 DEBUG [RS:1;bd53b59592b3:44343 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'bd53b59592b3,44343,1733593565149' 2024-12-07T17:46:06,610 DEBUG [RS:2;bd53b59592b3:38965 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'bd53b59592b3,38965,1733593565227' 2024-12-07T17:46:06,610 DEBUG [RS:1;bd53b59592b3:44343 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-07T17:46:06,610 DEBUG [RS:2;bd53b59592b3:38965 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-07T17:46:06,611 DEBUG [RS:1;bd53b59592b3:44343 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-07T17:46:06,611 DEBUG [RS:2;bd53b59592b3:38965 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-07T17:46:06,611 DEBUG [RS:2;bd53b59592b3:38965 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-07T17:46:06,611 DEBUG [RS:1;bd53b59592b3:44343 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-07T17:46:06,611 DEBUG [RS:2;bd53b59592b3:38965 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-07T17:46:06,611 DEBUG [RS:1;bd53b59592b3:44343 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-07T17:46:06,611 DEBUG [RS:2;bd53b59592b3:38965 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager bd53b59592b3,38965,1733593565227 2024-12-07T17:46:06,611 DEBUG [RS:1;bd53b59592b3:44343 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager bd53b59592b3,44343,1733593565149 2024-12-07T17:46:06,611 DEBUG [RS:2;bd53b59592b3:38965 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'bd53b59592b3,38965,1733593565227' 2024-12-07T17:46:06,611 DEBUG [RS:2;bd53b59592b3:38965 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-07T17:46:06,611 DEBUG [RS:1;bd53b59592b3:44343 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'bd53b59592b3,44343,1733593565149' 2024-12-07T17:46:06,611 DEBUG [RS:1;bd53b59592b3:44343 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-07T17:46:06,612 DEBUG [RS:2;bd53b59592b3:38965 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-07T17:46:06,612 DEBUG [RS:1;bd53b59592b3:44343 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-07T17:46:06,613 DEBUG [RS:2;bd53b59592b3:38965 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-07T17:46:06,613 DEBUG [RS:1;bd53b59592b3:44343 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-07T17:46:06,613 INFO [RS:2;bd53b59592b3:38965 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-07T17:46:06,613 INFO [RS:2;bd53b59592b3:38965 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-07T17:46:06,613 INFO [RS:1;bd53b59592b3:44343 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-07T17:46:06,613 INFO [RS:1;bd53b59592b3:44343 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-07T17:46:06,635 WARN [bd53b59592b3:35933 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-07T17:46:06,717 INFO [RS:2;bd53b59592b3:38965 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-07T17:46:06,717 INFO [RS:0;bd53b59592b3:36517 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-07T17:46:06,717 INFO [RS:1;bd53b59592b3:44343 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-07T17:46:06,720 INFO [RS:1;bd53b59592b3:44343 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=bd53b59592b3%2C44343%2C1733593565149, suffix=, logDir=hdfs://localhost:35255/user/jenkins/test-data/54aa0d7e-ded0-c68b-f87e-78244f096baf/WALs/bd53b59592b3,44343,1733593565149, archiveDir=hdfs://localhost:35255/user/jenkins/test-data/54aa0d7e-ded0-c68b-f87e-78244f096baf/oldWALs, maxLogs=32 2024-12-07T17:46:06,720 INFO [RS:2;bd53b59592b3:38965 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=bd53b59592b3%2C38965%2C1733593565227, suffix=, logDir=hdfs://localhost:35255/user/jenkins/test-data/54aa0d7e-ded0-c68b-f87e-78244f096baf/WALs/bd53b59592b3,38965,1733593565227, archiveDir=hdfs://localhost:35255/user/jenkins/test-data/54aa0d7e-ded0-c68b-f87e-78244f096baf/oldWALs, maxLogs=32 2024-12-07T17:46:06,720 INFO [RS:0;bd53b59592b3:36517 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=bd53b59592b3%2C36517%2C1733593565044, suffix=, logDir=hdfs://localhost:35255/user/jenkins/test-data/54aa0d7e-ded0-c68b-f87e-78244f096baf/WALs/bd53b59592b3,36517,1733593565044, archiveDir=hdfs://localhost:35255/user/jenkins/test-data/54aa0d7e-ded0-c68b-f87e-78244f096baf/oldWALs, maxLogs=32 2024-12-07T17:46:06,736 DEBUG [RS:1;bd53b59592b3:44343 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/54aa0d7e-ded0-c68b-f87e-78244f096baf/WALs/bd53b59592b3,44343,1733593565149/bd53b59592b3%2C44343%2C1733593565149.1733593566723, exclude list is [], retry=0 2024-12-07T17:46:06,741 DEBUG [RS:0;bd53b59592b3:36517 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/54aa0d7e-ded0-c68b-f87e-78244f096baf/WALs/bd53b59592b3,36517,1733593565044/bd53b59592b3%2C36517%2C1733593565044.1733593566723, exclude list is [], retry=0 2024-12-07T17:46:06,741 DEBUG [RS:2;bd53b59592b3:38965 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/54aa0d7e-ded0-c68b-f87e-78244f096baf/WALs/bd53b59592b3,38965,1733593565227/bd53b59592b3%2C38965%2C1733593565227.1733593566723, exclude list is [], retry=0 2024-12-07T17:46:06,741 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:41011,DS-deaa7d0e-da30-4026-b202-38064c06578f,DISK] 2024-12-07T17:46:06,741 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38991,DS-4c651403-0178-49b1-8770-385cd4b250ca,DISK] 2024-12-07T17:46:06,741 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38303,DS-3843ebc5-805b-4b6e-a742-a831020a81a9,DISK] 2024-12-07T17:46:06,768 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38303,DS-3843ebc5-805b-4b6e-a742-a831020a81a9,DISK] 2024-12-07T17:46:06,769 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:41011,DS-deaa7d0e-da30-4026-b202-38064c06578f,DISK] 2024-12-07T17:46:06,769 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38991,DS-4c651403-0178-49b1-8770-385cd4b250ca,DISK] 2024-12-07T17:46:06,769 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:41011,DS-deaa7d0e-da30-4026-b202-38064c06578f,DISK] 2024-12-07T17:46:06,769 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38991,DS-4c651403-0178-49b1-8770-385cd4b250ca,DISK] 2024-12-07T17:46:06,770 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38303,DS-3843ebc5-805b-4b6e-a742-a831020a81a9,DISK] 2024-12-07T17:46:06,774 INFO [RS:1;bd53b59592b3:44343 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/54aa0d7e-ded0-c68b-f87e-78244f096baf/WALs/bd53b59592b3,44343,1733593565149/bd53b59592b3%2C44343%2C1733593565149.1733593566723 2024-12-07T17:46:06,775 DEBUG [RS:1;bd53b59592b3:44343 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:40873:40873),(127.0.0.1/127.0.0.1:43253:43253),(127.0.0.1/127.0.0.1:34253:34253)] 2024-12-07T17:46:06,775 INFO [RS:0;bd53b59592b3:36517 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/54aa0d7e-ded0-c68b-f87e-78244f096baf/WALs/bd53b59592b3,36517,1733593565044/bd53b59592b3%2C36517%2C1733593565044.1733593566723 2024-12-07T17:46:06,776 INFO [RS:2;bd53b59592b3:38965 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/54aa0d7e-ded0-c68b-f87e-78244f096baf/WALs/bd53b59592b3,38965,1733593565227/bd53b59592b3%2C38965%2C1733593565227.1733593566723 2024-12-07T17:46:06,779 DEBUG [RS:0;bd53b59592b3:36517 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:43253:43253),(127.0.0.1/127.0.0.1:34253:34253),(127.0.0.1/127.0.0.1:40873:40873)] 2024-12-07T17:46:06,779 DEBUG [RS:2;bd53b59592b3:38965 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:34253:34253),(127.0.0.1/127.0.0.1:40873:40873),(127.0.0.1/127.0.0.1:43253:43253)] 2024-12-07T17:46:06,889 DEBUG [bd53b59592b3:35933 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=3, allServersCount=3 2024-12-07T17:46:06,900 DEBUG [bd53b59592b3:35933 {}] balancer.BalancerClusterState(204): Hosts are {bd53b59592b3=0} racks are {/default-rack=0} 2024-12-07T17:46:06,906 DEBUG [bd53b59592b3:35933 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-07T17:46:06,906 DEBUG [bd53b59592b3:35933 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-07T17:46:06,906 DEBUG [bd53b59592b3:35933 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-07T17:46:06,906 DEBUG [bd53b59592b3:35933 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-07T17:46:06,906 DEBUG [bd53b59592b3:35933 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-07T17:46:06,906 DEBUG [bd53b59592b3:35933 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-07T17:46:06,906 INFO [bd53b59592b3:35933 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-07T17:46:06,906 INFO [bd53b59592b3:35933 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-07T17:46:06,906 INFO [bd53b59592b3:35933 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-07T17:46:06,906 DEBUG [bd53b59592b3:35933 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-07T17:46:06,912 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=bd53b59592b3,36517,1733593565044 2024-12-07T17:46:06,918 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as bd53b59592b3,36517,1733593565044, state=OPENING 2024-12-07T17:46:06,954 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-07T17:46:06,963 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36517-0x100017040af0001, quorum=127.0.0.1:60863, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T17:46:06,963 DEBUG [pool-77-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38965-0x100017040af0003, quorum=127.0.0.1:60863, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T17:46:06,963 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44343-0x100017040af0002, quorum=127.0.0.1:60863, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T17:46:06,963 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35933-0x100017040af0000, quorum=127.0.0.1:60863, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T17:46:06,965 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-07T17:46:06,965 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-07T17:46:06,965 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-07T17:46:06,965 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-07T17:46:06,967 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-07T17:46:06,969 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=bd53b59592b3,36517,1733593565044}] 2024-12-07T17:46:07,145 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-07T17:46:07,147 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48749, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-07T17:46:07,158 INFO [RS_OPEN_META-regionserver/bd53b59592b3:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-07T17:46:07,158 INFO [RS_OPEN_META-regionserver/bd53b59592b3:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-07T17:46:07,159 INFO [RS_OPEN_META-regionserver/bd53b59592b3:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-12-07T17:46:07,162 INFO [RS_OPEN_META-regionserver/bd53b59592b3:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=bd53b59592b3%2C36517%2C1733593565044.meta, suffix=.meta, logDir=hdfs://localhost:35255/user/jenkins/test-data/54aa0d7e-ded0-c68b-f87e-78244f096baf/WALs/bd53b59592b3,36517,1733593565044, archiveDir=hdfs://localhost:35255/user/jenkins/test-data/54aa0d7e-ded0-c68b-f87e-78244f096baf/oldWALs, maxLogs=32 2024-12-07T17:46:07,178 DEBUG [RS_OPEN_META-regionserver/bd53b59592b3:0-0 {event_type=M_RS_OPEN_META, pid=3}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/54aa0d7e-ded0-c68b-f87e-78244f096baf/WALs/bd53b59592b3,36517,1733593565044/bd53b59592b3%2C36517%2C1733593565044.meta.1733593567164.meta, exclude list is [], retry=0 2024-12-07T17:46:07,182 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38303,DS-3843ebc5-805b-4b6e-a742-a831020a81a9,DISK] 2024-12-07T17:46:07,182 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:41011,DS-deaa7d0e-da30-4026-b202-38064c06578f,DISK] 2024-12-07T17:46:07,182 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38991,DS-4c651403-0178-49b1-8770-385cd4b250ca,DISK] 2024-12-07T17:46:07,184 INFO [RS_OPEN_META-regionserver/bd53b59592b3:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/54aa0d7e-ded0-c68b-f87e-78244f096baf/WALs/bd53b59592b3,36517,1733593565044/bd53b59592b3%2C36517%2C1733593565044.meta.1733593567164.meta 2024-12-07T17:46:07,185 DEBUG [RS_OPEN_META-regionserver/bd53b59592b3:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:34253:34253),(127.0.0.1/127.0.0.1:43253:43253),(127.0.0.1/127.0.0.1:40873:40873)] 2024-12-07T17:46:07,185 DEBUG [RS_OPEN_META-regionserver/bd53b59592b3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-07T17:46:07,186 DEBUG [RS_OPEN_META-regionserver/bd53b59592b3:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-07T17:46:07,188 DEBUG [RS_OPEN_META-regionserver/bd53b59592b3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-07T17:46:07,193 INFO [RS_OPEN_META-regionserver/bd53b59592b3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-07T17:46:07,196 DEBUG [RS_OPEN_META-regionserver/bd53b59592b3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-07T17:46:07,197 DEBUG [RS_OPEN_META-regionserver/bd53b59592b3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T17:46:07,197 DEBUG [RS_OPEN_META-regionserver/bd53b59592b3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-07T17:46:07,197 DEBUG [RS_OPEN_META-regionserver/bd53b59592b3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-07T17:46:07,200 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-07T17:46:07,202 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-07T17:46:07,202 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T17:46:07,202 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T17:46:07,203 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-07T17:46:07,204 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-07T17:46:07,204 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T17:46:07,205 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T17:46:07,205 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-07T17:46:07,207 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-07T17:46:07,207 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T17:46:07,208 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T17:46:07,208 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-07T17:46:07,209 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-07T17:46:07,209 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T17:46:07,210 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T17:46:07,210 DEBUG [RS_OPEN_META-regionserver/bd53b59592b3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-07T17:46:07,211 DEBUG [RS_OPEN_META-regionserver/bd53b59592b3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35255/user/jenkins/test-data/54aa0d7e-ded0-c68b-f87e-78244f096baf/data/hbase/meta/1588230740 2024-12-07T17:46:07,214 DEBUG [RS_OPEN_META-regionserver/bd53b59592b3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35255/user/jenkins/test-data/54aa0d7e-ded0-c68b-f87e-78244f096baf/data/hbase/meta/1588230740 2024-12-07T17:46:07,217 DEBUG [RS_OPEN_META-regionserver/bd53b59592b3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-07T17:46:07,217 DEBUG [RS_OPEN_META-regionserver/bd53b59592b3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-07T17:46:07,218 DEBUG [RS_OPEN_META-regionserver/bd53b59592b3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-07T17:46:07,220 DEBUG [RS_OPEN_META-regionserver/bd53b59592b3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-07T17:46:07,221 INFO [RS_OPEN_META-regionserver/bd53b59592b3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=61300621, jitterRate=-0.08654956519603729}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-07T17:46:07,222 DEBUG [RS_OPEN_META-regionserver/bd53b59592b3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-07T17:46:07,223 DEBUG [RS_OPEN_META-regionserver/bd53b59592b3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733593567197Writing region info on filesystem at 1733593567198 (+1 ms)Initializing all the Stores at 1733593567200 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733593567200Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733593567200Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733593567200Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733593567200Cleaning up temporary data from old regions at 1733593567217 (+17 ms)Running coprocessor post-open hooks at 1733593567222 (+5 ms)Region opened successfully at 1733593567222 2024-12-07T17:46:07,228 INFO [RS_OPEN_META-regionserver/bd53b59592b3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733593567136 2024-12-07T17:46:07,238 DEBUG [RS_OPEN_META-regionserver/bd53b59592b3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-07T17:46:07,238 INFO [RS_OPEN_META-regionserver/bd53b59592b3:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-07T17:46:07,239 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=bd53b59592b3,36517,1733593565044 2024-12-07T17:46:07,242 INFO [PEWorker-4 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as bd53b59592b3,36517,1733593565044, state=OPEN 2024-12-07T17:46:07,263 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36517-0x100017040af0001, quorum=127.0.0.1:60863, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-07T17:46:07,263 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44343-0x100017040af0002, quorum=127.0.0.1:60863, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-07T17:46:07,263 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35933-0x100017040af0000, quorum=127.0.0.1:60863, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-07T17:46:07,263 DEBUG [pool-77-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38965-0x100017040af0003, quorum=127.0.0.1:60863, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-07T17:46:07,263 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-07T17:46:07,263 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-07T17:46:07,263 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-07T17:46:07,263 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-07T17:46:07,264 DEBUG [PEWorker-4 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=bd53b59592b3,36517,1733593565044 2024-12-07T17:46:07,271 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-07T17:46:07,272 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=bd53b59592b3,36517,1733593565044 in 295 msec 2024-12-07T17:46:07,280 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-07T17:46:07,280 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 803 msec 2024-12-07T17:46:07,282 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-07T17:46:07,282 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-07T17:46:07,300 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-07T17:46:07,301 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=bd53b59592b3,36517,1733593565044, seqNum=-1] 2024-12-07T17:46:07,317 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T17:46:07,320 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34041, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T17:46:07,358 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.1040 sec 2024-12-07T17:46:07,358 INFO [master/bd53b59592b3:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733593567358, completionTime=-1 2024-12-07T17:46:07,361 INFO [master/bd53b59592b3:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=3; waited=0ms, expected min=3 server(s), max=3 server(s), master is running 2024-12-07T17:46:07,361 DEBUG [master/bd53b59592b3:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-07T17:46:07,389 INFO [master/bd53b59592b3:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=3 2024-12-07T17:46:07,389 INFO [master/bd53b59592b3:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733593627389 2024-12-07T17:46:07,389 INFO [master/bd53b59592b3:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733593687389 2024-12-07T17:46:07,389 INFO [master/bd53b59592b3:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 28 msec 2024-12-07T17:46:07,391 DEBUG [master/bd53b59592b3:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 1588230740 changed from -1.0 to 0.0, refreshing cache 2024-12-07T17:46:07,396 INFO [master/bd53b59592b3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=bd53b59592b3,35933,1733593564405-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-07T17:46:07,397 INFO [master/bd53b59592b3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=bd53b59592b3,35933,1733593564405-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T17:46:07,397 INFO [master/bd53b59592b3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=bd53b59592b3,35933,1733593564405-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T17:46:07,398 INFO [master/bd53b59592b3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-bd53b59592b3:35933, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T17:46:07,399 INFO [master/bd53b59592b3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-07T17:46:07,399 INFO [master/bd53b59592b3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-07T17:46:07,405 DEBUG [master/bd53b59592b3:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-07T17:46:07,426 INFO [master/bd53b59592b3:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 2.085sec 2024-12-07T17:46:07,427 INFO [master/bd53b59592b3:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-07T17:46:07,428 INFO [master/bd53b59592b3:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-07T17:46:07,429 INFO [master/bd53b59592b3:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-07T17:46:07,429 INFO [master/bd53b59592b3:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-07T17:46:07,429 INFO [master/bd53b59592b3:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-07T17:46:07,430 INFO [master/bd53b59592b3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=bd53b59592b3,35933,1733593564405-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-07T17:46:07,430 INFO [master/bd53b59592b3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=bd53b59592b3,35933,1733593564405-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-07T17:46:07,434 DEBUG [master/bd53b59592b3:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-07T17:46:07,435 INFO [master/bd53b59592b3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-07T17:46:07,435 INFO [master/bd53b59592b3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=bd53b59592b3,35933,1733593564405-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T17:46:07,488 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7c2b8794, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T17:46:07,491 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-12-07T17:46:07,491 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-12-07T17:46:07,495 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request bd53b59592b3,35933,-1 for getting cluster id 2024-12-07T17:46:07,497 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-07T17:46:07,503 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '7af12544-0327-47ed-8209-dd2204f6e024' 2024-12-07T17:46:07,505 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-07T17:46:07,505 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "7af12544-0327-47ed-8209-dd2204f6e024" 2024-12-07T17:46:07,506 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@70415419, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T17:46:07,506 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [bd53b59592b3,35933,-1] 2024-12-07T17:46:07,508 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-07T17:46:07,509 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T17:46:07,510 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33644, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-07T17:46:07,513 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5260075b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T17:46:07,513 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-07T17:46:07,520 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=bd53b59592b3,36517,1733593565044, seqNum=-1] 2024-12-07T17:46:07,520 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T17:46:07,523 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48574, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T17:46:07,540 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=bd53b59592b3,35933,1733593564405 2024-12-07T17:46:07,544 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-07T17:46:07,548 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.AsyncConnectionImpl(321): The fetched master address is bd53b59592b3,35933,1733593564405 2024-12-07T17:46:07,550 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@6eb857f5 2024-12-07T17:46:07,552 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-07T17:46:07,554 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33646, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-07T17:46:07,561 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35933 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-07T17:46:07,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35933 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC 2024-12-07T17:46:07,573 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_PRE_OPERATION 2024-12-07T17:46:07,576 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35933 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestHBaseWalOnEC" procId is: 4 2024-12-07T17:46:07,576 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T17:46:07,579 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-07T17:46:07,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35933 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-07T17:46:07,589 WARN [PEWorker-3 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-07T17:46:07,589 WARN [PEWorker-3 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-07T17:46:07,592 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-941362637_22 at /127.0.0.1:54910 [Receiving block BP-1730434801-172.17.0.2-1733593560554:blk_-9223372036854775680_1020] {}] datanode.DataXceiver(331): 127.0.0.1:38991:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54910 dst: /127.0.0.1:38991 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T17:46:07,601 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38991 is added to blk_-9223372036854775680_1021 (size=392) 2024-12-07T17:46:07,602 WARN [PEWorker-3 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-07T17:46:07,604 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => cc6581f010cf936fde20878096ab8134, NAME => 'TestHBaseWalOnEC,,1733593567556.cc6581f010cf936fde20878096ab8134.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:35255/user/jenkins/test-data/54aa0d7e-ded0-c68b-f87e-78244f096baf 2024-12-07T17:46:07,610 WARN [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-07T17:46:07,610 WARN [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-07T17:46:07,617 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-941362637_22 at /127.0.0.1:57652 [Receiving block BP-1730434801-172.17.0.2-1733593560554:blk_-9223372036854775664_1022] {}] datanode.DataXceiver(331): 127.0.0.1:38303:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:57652 dst: /127.0.0.1:38303 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T17:46:07,624 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38303 is added to blk_-9223372036854775664_1023 (size=51) 2024-12-07T17:46:07,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35933 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-07T17:46:07,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35933 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-07T17:46:08,026 WARN [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-07T17:46:08,028 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1733593567556.cc6581f010cf936fde20878096ab8134.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T17:46:08,028 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1722): Closing cc6581f010cf936fde20878096ab8134, disabling compactions & flushes 2024-12-07T17:46:08,029 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1733593567556.cc6581f010cf936fde20878096ab8134. 2024-12-07T17:46:08,029 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1733593567556.cc6581f010cf936fde20878096ab8134. 2024-12-07T17:46:08,029 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1733593567556.cc6581f010cf936fde20878096ab8134. after waiting 0 ms 2024-12-07T17:46:08,029 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1733593567556.cc6581f010cf936fde20878096ab8134. 2024-12-07T17:46:08,030 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1733593567556.cc6581f010cf936fde20878096ab8134. 2024-12-07T17:46:08,030 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1676): Region close journal for cc6581f010cf936fde20878096ab8134: Waiting for close lock at 1733593568028Disabling compacts and flushes for region at 1733593568028Disabling writes for close at 1733593568029 (+1 ms)Writing region close event to WAL at 1733593568030 (+1 ms)Closed at 1733593568030 2024-12-07T17:46:08,034 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ADD_TO_META 2024-12-07T17:46:08,038 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestHBaseWalOnEC,,1733593567556.cc6581f010cf936fde20878096ab8134.","families":{"info":[{"qualifier":"regioninfo","vlen":50,"tag":[],"timestamp":"1733593568034"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733593568034"}]},"ts":"1733593568034"} 2024-12-07T17:46:08,043 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-07T17:46:08,045 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-07T17:46:08,047 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733593568045"}]},"ts":"1733593568045"} 2024-12-07T17:46:08,052 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLING in hbase:meta 2024-12-07T17:46:08,052 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {bd53b59592b3=0} racks are {/default-rack=0} 2024-12-07T17:46:08,053 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-07T17:46:08,053 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-07T17:46:08,054 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-07T17:46:08,054 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-07T17:46:08,054 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-07T17:46:08,054 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-07T17:46:08,054 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-07T17:46:08,054 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-07T17:46:08,054 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-07T17:46:08,054 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-07T17:46:08,055 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=cc6581f010cf936fde20878096ab8134, ASSIGN}] 2024-12-07T17:46:08,058 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=cc6581f010cf936fde20878096ab8134, ASSIGN 2024-12-07T17:46:08,059 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=cc6581f010cf936fde20878096ab8134, ASSIGN; state=OFFLINE, location=bd53b59592b3,44343,1733593565149; forceNewPlan=false, retain=false 2024-12-07T17:46:08,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35933 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-07T17:46:08,213 INFO [bd53b59592b3:35933 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-07T17:46:08,214 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=cc6581f010cf936fde20878096ab8134, regionState=OPENING, regionLocation=bd53b59592b3,44343,1733593565149 2024-12-07T17:46:08,219 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=cc6581f010cf936fde20878096ab8134, ASSIGN because future has completed 2024-12-07T17:46:08,220 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure cc6581f010cf936fde20878096ab8134, server=bd53b59592b3,44343,1733593565149}] 2024-12-07T17:46:08,376 DEBUG [RSProcedureDispatcher-pool-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-07T17:46:08,381 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54725, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-07T17:46:08,390 INFO [RS_OPEN_REGION-regionserver/bd53b59592b3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestHBaseWalOnEC,,1733593567556.cc6581f010cf936fde20878096ab8134. 2024-12-07T17:46:08,390 DEBUG [RS_OPEN_REGION-regionserver/bd53b59592b3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => cc6581f010cf936fde20878096ab8134, NAME => 'TestHBaseWalOnEC,,1733593567556.cc6581f010cf936fde20878096ab8134.', STARTKEY => '', ENDKEY => ''} 2024-12-07T17:46:08,390 DEBUG [RS_OPEN_REGION-regionserver/bd53b59592b3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestHBaseWalOnEC cc6581f010cf936fde20878096ab8134 2024-12-07T17:46:08,390 DEBUG [RS_OPEN_REGION-regionserver/bd53b59592b3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1733593567556.cc6581f010cf936fde20878096ab8134.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T17:46:08,390 DEBUG [RS_OPEN_REGION-regionserver/bd53b59592b3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for cc6581f010cf936fde20878096ab8134 2024-12-07T17:46:08,391 DEBUG [RS_OPEN_REGION-regionserver/bd53b59592b3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for cc6581f010cf936fde20878096ab8134 2024-12-07T17:46:08,393 INFO [StoreOpener-cc6581f010cf936fde20878096ab8134-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region cc6581f010cf936fde20878096ab8134 2024-12-07T17:46:08,395 INFO [StoreOpener-cc6581f010cf936fde20878096ab8134-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region cc6581f010cf936fde20878096ab8134 columnFamilyName cf 2024-12-07T17:46:08,395 DEBUG [StoreOpener-cc6581f010cf936fde20878096ab8134-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T17:46:08,396 INFO [StoreOpener-cc6581f010cf936fde20878096ab8134-1 {}] regionserver.HStore(327): Store=cc6581f010cf936fde20878096ab8134/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T17:46:08,396 DEBUG [RS_OPEN_REGION-regionserver/bd53b59592b3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for cc6581f010cf936fde20878096ab8134 2024-12-07T17:46:08,397 DEBUG [RS_OPEN_REGION-regionserver/bd53b59592b3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35255/user/jenkins/test-data/54aa0d7e-ded0-c68b-f87e-78244f096baf/data/default/TestHBaseWalOnEC/cc6581f010cf936fde20878096ab8134 2024-12-07T17:46:08,398 DEBUG [RS_OPEN_REGION-regionserver/bd53b59592b3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35255/user/jenkins/test-data/54aa0d7e-ded0-c68b-f87e-78244f096baf/data/default/TestHBaseWalOnEC/cc6581f010cf936fde20878096ab8134 2024-12-07T17:46:08,399 DEBUG [RS_OPEN_REGION-regionserver/bd53b59592b3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for cc6581f010cf936fde20878096ab8134 2024-12-07T17:46:08,399 DEBUG [RS_OPEN_REGION-regionserver/bd53b59592b3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for cc6581f010cf936fde20878096ab8134 2024-12-07T17:46:08,401 DEBUG [RS_OPEN_REGION-regionserver/bd53b59592b3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for cc6581f010cf936fde20878096ab8134 2024-12-07T17:46:08,405 DEBUG [RS_OPEN_REGION-regionserver/bd53b59592b3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35255/user/jenkins/test-data/54aa0d7e-ded0-c68b-f87e-78244f096baf/data/default/TestHBaseWalOnEC/cc6581f010cf936fde20878096ab8134/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-07T17:46:08,406 INFO [RS_OPEN_REGION-regionserver/bd53b59592b3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened cc6581f010cf936fde20878096ab8134; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=67674158, jitterRate=0.008423537015914917}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-07T17:46:08,406 DEBUG [RS_OPEN_REGION-regionserver/bd53b59592b3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for cc6581f010cf936fde20878096ab8134 2024-12-07T17:46:08,407 DEBUG [RS_OPEN_REGION-regionserver/bd53b59592b3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for cc6581f010cf936fde20878096ab8134: Running coprocessor pre-open hook at 1733593568391Writing region info on filesystem at 1733593568391Initializing all the Stores at 1733593568392 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733593568392Cleaning up temporary data from old regions at 1733593568399 (+7 ms)Running coprocessor post-open hooks at 1733593568406 (+7 ms)Region opened successfully at 1733593568407 (+1 ms) 2024-12-07T17:46:08,409 INFO [RS_OPEN_REGION-regionserver/bd53b59592b3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestHBaseWalOnEC,,1733593567556.cc6581f010cf936fde20878096ab8134., pid=6, masterSystemTime=1733593568376 2024-12-07T17:46:08,411 DEBUG [RS_OPEN_REGION-regionserver/bd53b59592b3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestHBaseWalOnEC,,1733593567556.cc6581f010cf936fde20878096ab8134. 2024-12-07T17:46:08,411 INFO [RS_OPEN_REGION-regionserver/bd53b59592b3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestHBaseWalOnEC,,1733593567556.cc6581f010cf936fde20878096ab8134. 2024-12-07T17:46:08,412 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=cc6581f010cf936fde20878096ab8134, regionState=OPEN, openSeqNum=2, regionLocation=bd53b59592b3,44343,1733593565149 2024-12-07T17:46:08,415 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure cc6581f010cf936fde20878096ab8134, server=bd53b59592b3,44343,1733593565149 because future has completed 2024-12-07T17:46:08,421 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-07T17:46:08,421 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure cc6581f010cf936fde20878096ab8134, server=bd53b59592b3,44343,1733593565149 in 197 msec 2024-12-07T17:46:08,424 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-07T17:46:08,424 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=cc6581f010cf936fde20878096ab8134, ASSIGN in 366 msec 2024-12-07T17:46:08,426 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-07T17:46:08,426 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733593568426"}]},"ts":"1733593568426"} 2024-12-07T17:46:08,428 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLED in hbase:meta 2024-12-07T17:46:08,430 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_POST_OPERATION 2024-12-07T17:46:08,433 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC in 866 msec 2024-12-07T17:46:08,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35933 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-07T17:46:08,719 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestHBaseWalOnEC completed 2024-12-07T17:46:08,719 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table TestHBaseWalOnEC get assigned. Timeout = 60000ms 2024-12-07T17:46:08,721 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-07T17:46:08,730 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table TestHBaseWalOnEC assigned to meta. Checking AM states. 2024-12-07T17:46:08,731 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-07T17:46:08,731 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table TestHBaseWalOnEC assigned. 2024-12-07T17:46:08,742 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestHBaseWalOnEC', row='row', locateType=CURRENT is [region=TestHBaseWalOnEC,,1733593567556.cc6581f010cf936fde20878096ab8134., hostname=bd53b59592b3,44343,1733593565149, seqNum=2] 2024-12-07T17:46:08,744 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T17:46:08,747 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38610, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T17:46:08,755 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35933 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestHBaseWalOnEC 2024-12-07T17:46:08,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35933 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC 2024-12-07T17:46:08,764 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_PREPARE 2024-12-07T17:46:08,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35933 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-07T17:46:08,766 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-07T17:46:08,768 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-07T17:46:08,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35933 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-07T17:46:08,933 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44343 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-12-07T17:46:08,934 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bd53b59592b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestHBaseWalOnEC,,1733593567556.cc6581f010cf936fde20878096ab8134. 2024-12-07T17:46:08,940 INFO [RS_FLUSH_OPERATIONS-regionserver/bd53b59592b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing cc6581f010cf936fde20878096ab8134 1/1 column families, dataSize=32 B heapSize=360 B 2024-12-07T17:46:08,988 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bd53b59592b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35255/user/jenkins/test-data/54aa0d7e-ded0-c68b-f87e-78244f096baf/data/default/TestHBaseWalOnEC/cc6581f010cf936fde20878096ab8134/.tmp/cf/aeaea330a1e9406ab6ae06032152b1bf is 36, key is row/cf:cq/1733593568748/Put/seqid=0 2024-12-07T17:46:08,994 WARN [RS_FLUSH_OPERATIONS-regionserver/bd53b59592b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-07T17:46:08,995 WARN [RS_FLUSH_OPERATIONS-regionserver/bd53b59592b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-07T17:46:08,998 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1775440651_22 at /127.0.0.1:57668 [Receiving block BP-1730434801-172.17.0.2-1733593560554:blk_-9223372036854775648_1024] {}] datanode.DataXceiver(331): 127.0.0.1:38303:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:57668 dst: /127.0.0.1:38303 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T17:46:09,003 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38303 is added to blk_-9223372036854775648_1025 (size=4787) 2024-12-07T17:46:09,004 WARN [RS_FLUSH_OPERATIONS-regionserver/bd53b59592b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-07T17:46:09,004 INFO [RS_FLUSH_OPERATIONS-regionserver/bd53b59592b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=32 B at sequenceid=5 (bloomFilter=false), to=hdfs://localhost:35255/user/jenkins/test-data/54aa0d7e-ded0-c68b-f87e-78244f096baf/data/default/TestHBaseWalOnEC/cc6581f010cf936fde20878096ab8134/.tmp/cf/aeaea330a1e9406ab6ae06032152b1bf 2024-12-07T17:46:09,043 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bd53b59592b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35255/user/jenkins/test-data/54aa0d7e-ded0-c68b-f87e-78244f096baf/data/default/TestHBaseWalOnEC/cc6581f010cf936fde20878096ab8134/.tmp/cf/aeaea330a1e9406ab6ae06032152b1bf as hdfs://localhost:35255/user/jenkins/test-data/54aa0d7e-ded0-c68b-f87e-78244f096baf/data/default/TestHBaseWalOnEC/cc6581f010cf936fde20878096ab8134/cf/aeaea330a1e9406ab6ae06032152b1bf 2024-12-07T17:46:09,055 INFO [RS_FLUSH_OPERATIONS-regionserver/bd53b59592b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35255/user/jenkins/test-data/54aa0d7e-ded0-c68b-f87e-78244f096baf/data/default/TestHBaseWalOnEC/cc6581f010cf936fde20878096ab8134/cf/aeaea330a1e9406ab6ae06032152b1bf, entries=1, sequenceid=5, filesize=4.7 K 2024-12-07T17:46:09,063 INFO [RS_FLUSH_OPERATIONS-regionserver/bd53b59592b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~32 B/32, heapSize ~344 B/344, currentSize=0 B/0 for cc6581f010cf936fde20878096ab8134 in 123ms, sequenceid=5, compaction requested=false 2024-12-07T17:46:09,064 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bd53b59592b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestHBaseWalOnEC' 2024-12-07T17:46:09,066 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bd53b59592b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for cc6581f010cf936fde20878096ab8134: 2024-12-07T17:46:09,067 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bd53b59592b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestHBaseWalOnEC,,1733593567556.cc6581f010cf936fde20878096ab8134. 2024-12-07T17:46:09,068 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bd53b59592b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-12-07T17:46:09,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35933 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-12-07T17:46:09,077 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-12-07T17:46:09,077 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 306 msec 2024-12-07T17:46:09,083 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC in 322 msec 2024-12-07T17:46:09,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35933 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-07T17:46:09,088 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestHBaseWalOnEC completed 2024-12-07T17:46:09,101 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-07T17:46:09,101 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-07T17:46:09,101 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-07T17:46:09,105 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T17:46:09,106 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T17:46:09,106 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-07T17:46:09,106 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-07T17:46:09,106 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=404023594, stopped=false 2024-12-07T17:46:09,107 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=bd53b59592b3,35933,1733593564405 2024-12-07T17:46:09,154 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44343-0x100017040af0002, quorum=127.0.0.1:60863, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-07T17:46:09,154 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35933-0x100017040af0000, quorum=127.0.0.1:60863, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-07T17:46:09,154 DEBUG [pool-77-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38965-0x100017040af0003, quorum=127.0.0.1:60863, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-07T17:46:09,154 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36517-0x100017040af0001, quorum=127.0.0.1:60863, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-07T17:46:09,154 DEBUG [pool-77-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38965-0x100017040af0003, quorum=127.0.0.1:60863, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T17:46:09,154 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35933-0x100017040af0000, quorum=127.0.0.1:60863, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T17:46:09,154 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44343-0x100017040af0002, quorum=127.0.0.1:60863, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T17:46:09,154 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36517-0x100017040af0001, quorum=127.0.0.1:60863, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T17:46:09,154 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-07T17:46:09,155 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-07T17:46:09,155 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-07T17:46:09,155 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T17:46:09,156 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:38965-0x100017040af0003, quorum=127.0.0.1:60863, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-07T17:46:09,156 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'bd53b59592b3,36517,1733593565044' ***** 2024-12-07T17:46:09,156 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-07T17:46:09,156 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:35933-0x100017040af0000, quorum=127.0.0.1:60863, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-07T17:46:09,156 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'bd53b59592b3,44343,1733593565149' ***** 2024-12-07T17:46:09,156 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:44343-0x100017040af0002, quorum=127.0.0.1:60863, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-07T17:46:09,156 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-07T17:46:09,156 INFO [RS:0;bd53b59592b3:36517 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-07T17:46:09,156 INFO [RS:1;bd53b59592b3:44343 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-07T17:46:09,157 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:36517-0x100017040af0001, quorum=127.0.0.1:60863, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-07T17:46:09,157 INFO [RS:0;bd53b59592b3:36517 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-07T17:46:09,157 INFO [RS:0;bd53b59592b3:36517 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-07T17:46:09,157 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-07T17:46:09,157 INFO [RS:0;bd53b59592b3:36517 {}] regionserver.HRegionServer(959): stopping server bd53b59592b3,36517,1733593565044 2024-12-07T17:46:09,157 INFO [RS:1;bd53b59592b3:44343 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-07T17:46:09,157 INFO [RS:0;bd53b59592b3:36517 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-07T17:46:09,157 INFO [RS:1;bd53b59592b3:44343 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-07T17:46:09,157 INFO [RS:0;bd53b59592b3:36517 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;bd53b59592b3:36517. 2024-12-07T17:46:09,158 DEBUG [RS:0;bd53b59592b3:36517 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-07T17:46:09,158 INFO [RS:1;bd53b59592b3:44343 {}] regionserver.HRegionServer(3091): Received CLOSE for cc6581f010cf936fde20878096ab8134 2024-12-07T17:46:09,158 DEBUG [RS:0;bd53b59592b3:36517 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T17:46:09,158 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'bd53b59592b3,38965,1733593565227' ***** 2024-12-07T17:46:09,158 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-07T17:46:09,158 INFO [RS:0;bd53b59592b3:36517 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-07T17:46:09,158 INFO [RS:0;bd53b59592b3:36517 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-07T17:46:09,158 INFO [RS:0;bd53b59592b3:36517 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-07T17:46:09,158 INFO [RS:2;bd53b59592b3:38965 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-07T17:46:09,158 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-07T17:46:09,158 INFO [RS:0;bd53b59592b3:36517 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-07T17:46:09,158 INFO [RS:2;bd53b59592b3:38965 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-07T17:46:09,158 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-07T17:46:09,158 INFO [RS:2;bd53b59592b3:38965 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-07T17:46:09,159 INFO [RS:2;bd53b59592b3:38965 {}] regionserver.HRegionServer(959): stopping server bd53b59592b3,38965,1733593565227 2024-12-07T17:46:09,159 INFO [RS:2;bd53b59592b3:38965 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-07T17:46:09,159 INFO [RS:1;bd53b59592b3:44343 {}] regionserver.HRegionServer(959): stopping server bd53b59592b3,44343,1733593565149 2024-12-07T17:46:09,159 INFO [RS:2;bd53b59592b3:38965 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:2;bd53b59592b3:38965. 2024-12-07T17:46:09,159 INFO [RS:1;bd53b59592b3:44343 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-07T17:46:09,159 DEBUG [RS:2;bd53b59592b3:38965 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-07T17:46:09,159 INFO [RS:1;bd53b59592b3:44343 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;bd53b59592b3:44343. 2024-12-07T17:46:09,159 DEBUG [RS:2;bd53b59592b3:38965 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T17:46:09,159 DEBUG [RS:1;bd53b59592b3:44343 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-07T17:46:09,159 DEBUG [RS:1;bd53b59592b3:44343 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T17:46:09,159 INFO [RS:2;bd53b59592b3:38965 {}] regionserver.HRegionServer(976): stopping server bd53b59592b3,38965,1733593565227; all regions closed. 2024-12-07T17:46:09,159 INFO [RS:1;bd53b59592b3:44343 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-12-07T17:46:09,159 DEBUG [RS_CLOSE_REGION-regionserver/bd53b59592b3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing cc6581f010cf936fde20878096ab8134, disabling compactions & flushes 2024-12-07T17:46:09,160 DEBUG [RS:1;bd53b59592b3:44343 {}] regionserver.HRegionServer(1325): Online Regions={cc6581f010cf936fde20878096ab8134=TestHBaseWalOnEC,,1733593567556.cc6581f010cf936fde20878096ab8134.} 2024-12-07T17:46:09,160 INFO [RS_CLOSE_REGION-regionserver/bd53b59592b3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1733593567556.cc6581f010cf936fde20878096ab8134. 2024-12-07T17:46:09,160 DEBUG [RS_CLOSE_REGION-regionserver/bd53b59592b3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1733593567556.cc6581f010cf936fde20878096ab8134. 2024-12-07T17:46:09,160 DEBUG [RS_CLOSE_REGION-regionserver/bd53b59592b3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1733593567556.cc6581f010cf936fde20878096ab8134. after waiting 0 ms 2024-12-07T17:46:09,160 DEBUG [RS_CLOSE_REGION-regionserver/bd53b59592b3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1733593567556.cc6581f010cf936fde20878096ab8134. 2024-12-07T17:46:09,160 INFO [RS:0;bd53b59592b3:36517 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-12-07T17:46:09,160 DEBUG [RS:0;bd53b59592b3:36517 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-12-07T17:46:09,161 DEBUG [RS:1;bd53b59592b3:44343 {}] regionserver.HRegionServer(1351): Waiting on cc6581f010cf936fde20878096ab8134 2024-12-07T17:46:09,161 DEBUG [RS_CLOSE_META-regionserver/bd53b59592b3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-07T17:46:09,161 INFO [RS_CLOSE_META-regionserver/bd53b59592b3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-07T17:46:09,161 DEBUG [RS:0;bd53b59592b3:36517 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-12-07T17:46:09,161 DEBUG [RS_CLOSE_META-regionserver/bd53b59592b3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-07T17:46:09,161 DEBUG [RS_CLOSE_META-regionserver/bd53b59592b3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-07T17:46:09,161 DEBUG [RS_CLOSE_META-regionserver/bd53b59592b3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-07T17:46:09,162 INFO [RS_CLOSE_META-regionserver/bd53b59592b3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.34 KB heapSize=3.38 KB 2024-12-07T17:46:09,167 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38991 is added to blk_1073741828_1018 (size=93) 2024-12-07T17:46:09,167 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41011 is added to blk_1073741828_1018 (size=93) 2024-12-07T17:46:09,168 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38303 is added to blk_1073741828_1018 (size=93) 2024-12-07T17:46:09,170 INFO [regionserver/bd53b59592b3:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-07T17:46:09,170 INFO [regionserver/bd53b59592b3:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-07T17:46:09,171 INFO [regionserver/bd53b59592b3:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-07T17:46:09,175 DEBUG [RS:2;bd53b59592b3:38965 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/54aa0d7e-ded0-c68b-f87e-78244f096baf/oldWALs 2024-12-07T17:46:09,175 INFO [RS:2;bd53b59592b3:38965 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL bd53b59592b3%2C38965%2C1733593565227:(num 1733593566723) 2024-12-07T17:46:09,175 DEBUG [RS:2;bd53b59592b3:38965 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T17:46:09,175 INFO [RS:2;bd53b59592b3:38965 {}] regionserver.LeaseManager(133): Closed leases 2024-12-07T17:46:09,176 INFO [RS:2;bd53b59592b3:38965 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-07T17:46:09,176 INFO [RS:2;bd53b59592b3:38965 {}] hbase.ChoreService(370): Chore service for: regionserver/bd53b59592b3:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-07T17:46:09,176 INFO [RS:2;bd53b59592b3:38965 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-07T17:46:09,176 INFO [RS:2;bd53b59592b3:38965 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-07T17:46:09,176 INFO [regionserver/bd53b59592b3:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-07T17:46:09,176 INFO [RS:2;bd53b59592b3:38965 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-07T17:46:09,176 INFO [RS:2;bd53b59592b3:38965 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-07T17:46:09,176 INFO [RS:2;bd53b59592b3:38965 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:38965 2024-12-07T17:46:09,182 DEBUG [RS_CLOSE_REGION-regionserver/bd53b59592b3:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35255/user/jenkins/test-data/54aa0d7e-ded0-c68b-f87e-78244f096baf/data/default/TestHBaseWalOnEC/cc6581f010cf936fde20878096ab8134/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-07T17:46:09,184 INFO [RS_CLOSE_REGION-regionserver/bd53b59592b3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1733593567556.cc6581f010cf936fde20878096ab8134. 2024-12-07T17:46:09,184 DEBUG [RS_CLOSE_REGION-regionserver/bd53b59592b3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for cc6581f010cf936fde20878096ab8134: Waiting for close lock at 1733593569159Running coprocessor pre-close hooks at 1733593569159Disabling compacts and flushes for region at 1733593569159Disabling writes for close at 1733593569160 (+1 ms)Writing region close event to WAL at 1733593569163 (+3 ms)Running coprocessor post-close hooks at 1733593569183 (+20 ms)Closed at 1733593569184 (+1 ms) 2024-12-07T17:46:09,184 DEBUG [RS_CLOSE_REGION-regionserver/bd53b59592b3:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestHBaseWalOnEC,,1733593567556.cc6581f010cf936fde20878096ab8134. 2024-12-07T17:46:09,187 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35933-0x100017040af0000, quorum=127.0.0.1:60863, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-07T17:46:09,187 DEBUG [pool-77-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38965-0x100017040af0003, quorum=127.0.0.1:60863, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/bd53b59592b3,38965,1733593565227 2024-12-07T17:46:09,187 INFO [RS:2;bd53b59592b3:38965 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-07T17:46:09,188 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [bd53b59592b3,38965,1733593565227] 2024-12-07T17:46:09,200 DEBUG [RS_CLOSE_META-regionserver/bd53b59592b3:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35255/user/jenkins/test-data/54aa0d7e-ded0-c68b-f87e-78244f096baf/data/hbase/meta/1588230740/.tmp/info/e16cda34c7014dd389a03a4c4c2df107 is 153, key is TestHBaseWalOnEC,,1733593567556.cc6581f010cf936fde20878096ab8134./info:regioninfo/1733593568412/Put/seqid=0 2024-12-07T17:46:09,203 WARN [RS_CLOSE_META-regionserver/bd53b59592b3:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-07T17:46:09,203 WARN [RS_CLOSE_META-regionserver/bd53b59592b3:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-07T17:46:09,204 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/bd53b59592b3,38965,1733593565227 already deleted, retry=false 2024-12-07T17:46:09,204 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; bd53b59592b3,38965,1733593565227 expired; onlineServers=2 2024-12-07T17:46:09,206 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-633379970_22 at /127.0.0.1:54930 [Receiving block BP-1730434801-172.17.0.2-1733593560554:blk_-9223372036854775632_1026] {}] datanode.DataXceiver(331): 127.0.0.1:38991:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54930 dst: /127.0.0.1:38991 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T17:46:09,211 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38991 is added to blk_-9223372036854775632_1027 (size=6637) 2024-12-07T17:46:09,212 WARN [RS_CLOSE_META-regionserver/bd53b59592b3:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-07T17:46:09,212 INFO [RS_CLOSE_META-regionserver/bd53b59592b3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.18 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:35255/user/jenkins/test-data/54aa0d7e-ded0-c68b-f87e-78244f096baf/data/hbase/meta/1588230740/.tmp/info/e16cda34c7014dd389a03a4c4c2df107 2024-12-07T17:46:09,237 DEBUG [RS_CLOSE_META-regionserver/bd53b59592b3:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35255/user/jenkins/test-data/54aa0d7e-ded0-c68b-f87e-78244f096baf/data/hbase/meta/1588230740/.tmp/ns/04e9f360e3f34927b71c3a25d784003a is 43, key is default/ns:d/1733593567340/Put/seqid=0 2024-12-07T17:46:09,239 WARN [RS_CLOSE_META-regionserver/bd53b59592b3:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-07T17:46:09,239 WARN [RS_CLOSE_META-regionserver/bd53b59592b3:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-07T17:46:09,243 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-633379970_22 at /127.0.0.1:54954 [Receiving block BP-1730434801-172.17.0.2-1733593560554:blk_-9223372036854775616_1028] {}] datanode.DataXceiver(331): 127.0.0.1:38991:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54954 dst: /127.0.0.1:38991 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T17:46:09,247 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38991 is added to blk_-9223372036854775616_1029 (size=5153) 2024-12-07T17:46:09,248 WARN [RS_CLOSE_META-regionserver/bd53b59592b3:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-07T17:46:09,248 INFO [RS_CLOSE_META-regionserver/bd53b59592b3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:35255/user/jenkins/test-data/54aa0d7e-ded0-c68b-f87e-78244f096baf/data/hbase/meta/1588230740/.tmp/ns/04e9f360e3f34927b71c3a25d784003a 2024-12-07T17:46:09,272 DEBUG [RS_CLOSE_META-regionserver/bd53b59592b3:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35255/user/jenkins/test-data/54aa0d7e-ded0-c68b-f87e-78244f096baf/data/hbase/meta/1588230740/.tmp/table/8fd44e301c0d4cb8825499e2231fcc7e is 52, key is TestHBaseWalOnEC/table:state/1733593568426/Put/seqid=0 2024-12-07T17:46:09,274 WARN [RS_CLOSE_META-regionserver/bd53b59592b3:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-07T17:46:09,274 WARN [RS_CLOSE_META-regionserver/bd53b59592b3:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-07T17:46:09,277 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-633379970_22 at /127.0.0.1:54980 [Receiving block BP-1730434801-172.17.0.2-1733593560554:blk_-9223372036854775600_1030] {}] datanode.DataXceiver(331): 127.0.0.1:38991:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54980 dst: /127.0.0.1:38991 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T17:46:09,281 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38991 is added to blk_-9223372036854775600_1031 (size=5249) 2024-12-07T17:46:09,281 WARN [RS_CLOSE_META-regionserver/bd53b59592b3:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-07T17:46:09,282 INFO [RS_CLOSE_META-regionserver/bd53b59592b3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=96 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:35255/user/jenkins/test-data/54aa0d7e-ded0-c68b-f87e-78244f096baf/data/hbase/meta/1588230740/.tmp/table/8fd44e301c0d4cb8825499e2231fcc7e 2024-12-07T17:46:09,292 DEBUG [RS_CLOSE_META-regionserver/bd53b59592b3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35255/user/jenkins/test-data/54aa0d7e-ded0-c68b-f87e-78244f096baf/data/hbase/meta/1588230740/.tmp/info/e16cda34c7014dd389a03a4c4c2df107 as hdfs://localhost:35255/user/jenkins/test-data/54aa0d7e-ded0-c68b-f87e-78244f096baf/data/hbase/meta/1588230740/info/e16cda34c7014dd389a03a4c4c2df107 2024-12-07T17:46:09,296 DEBUG [pool-77-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38965-0x100017040af0003, quorum=127.0.0.1:60863, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-07T17:46:09,296 DEBUG [pool-77-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38965-0x100017040af0003, quorum=127.0.0.1:60863, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-07T17:46:09,296 INFO [RS:2;bd53b59592b3:38965 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-07T17:46:09,297 INFO [RS:2;bd53b59592b3:38965 {}] regionserver.HRegionServer(1031): Exiting; stopping=bd53b59592b3,38965,1733593565227; zookeeper connection closed. 2024-12-07T17:46:09,297 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@2a451b1c {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@2a451b1c 2024-12-07T17:46:09,301 INFO [RS_CLOSE_META-regionserver/bd53b59592b3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35255/user/jenkins/test-data/54aa0d7e-ded0-c68b-f87e-78244f096baf/data/hbase/meta/1588230740/info/e16cda34c7014dd389a03a4c4c2df107, entries=10, sequenceid=11, filesize=6.5 K 2024-12-07T17:46:09,303 DEBUG [RS_CLOSE_META-regionserver/bd53b59592b3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35255/user/jenkins/test-data/54aa0d7e-ded0-c68b-f87e-78244f096baf/data/hbase/meta/1588230740/.tmp/ns/04e9f360e3f34927b71c3a25d784003a as hdfs://localhost:35255/user/jenkins/test-data/54aa0d7e-ded0-c68b-f87e-78244f096baf/data/hbase/meta/1588230740/ns/04e9f360e3f34927b71c3a25d784003a 2024-12-07T17:46:09,313 INFO [RS_CLOSE_META-regionserver/bd53b59592b3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35255/user/jenkins/test-data/54aa0d7e-ded0-c68b-f87e-78244f096baf/data/hbase/meta/1588230740/ns/04e9f360e3f34927b71c3a25d784003a, entries=2, sequenceid=11, filesize=5.0 K 2024-12-07T17:46:09,316 DEBUG [RS_CLOSE_META-regionserver/bd53b59592b3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35255/user/jenkins/test-data/54aa0d7e-ded0-c68b-f87e-78244f096baf/data/hbase/meta/1588230740/.tmp/table/8fd44e301c0d4cb8825499e2231fcc7e as hdfs://localhost:35255/user/jenkins/test-data/54aa0d7e-ded0-c68b-f87e-78244f096baf/data/hbase/meta/1588230740/table/8fd44e301c0d4cb8825499e2231fcc7e 2024-12-07T17:46:09,327 INFO [RS_CLOSE_META-regionserver/bd53b59592b3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35255/user/jenkins/test-data/54aa0d7e-ded0-c68b-f87e-78244f096baf/data/hbase/meta/1588230740/table/8fd44e301c0d4cb8825499e2231fcc7e, entries=2, sequenceid=11, filesize=5.1 K 2024-12-07T17:46:09,329 INFO [RS_CLOSE_META-regionserver/bd53b59592b3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 167ms, sequenceid=11, compaction requested=false 2024-12-07T17:46:09,329 DEBUG [RS_CLOSE_META-regionserver/bd53b59592b3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-07T17:46:09,338 DEBUG [RS_CLOSE_META-regionserver/bd53b59592b3:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35255/user/jenkins/test-data/54aa0d7e-ded0-c68b-f87e-78244f096baf/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-12-07T17:46:09,339 DEBUG [RS_CLOSE_META-regionserver/bd53b59592b3:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-07T17:46:09,339 INFO [RS_CLOSE_META-regionserver/bd53b59592b3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-07T17:46:09,339 DEBUG [RS_CLOSE_META-regionserver/bd53b59592b3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733593569161Running coprocessor pre-close hooks at 1733593569161Disabling compacts and flushes for region at 1733593569161Disabling writes for close at 1733593569161Obtaining lock to block concurrent updates at 1733593569162 (+1 ms)Preparing flush snapshotting stores in 1588230740 at 1733593569162Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1377, getHeapSize=3392, getOffHeapSize=0, getCellsCount=14 at 1733593569163 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1733593569164 (+1 ms)Flushing 1588230740/info: creating writer at 1733593569164Flushing 1588230740/info: appending metadata at 1733593569197 (+33 ms)Flushing 1588230740/info: closing flushed file at 1733593569197Flushing 1588230740/ns: creating writer at 1733593569222 (+25 ms)Flushing 1588230740/ns: appending metadata at 1733593569236 (+14 ms)Flushing 1588230740/ns: closing flushed file at 1733593569236Flushing 1588230740/table: creating writer at 1733593569258 (+22 ms)Flushing 1588230740/table: appending metadata at 1733593569271 (+13 ms)Flushing 1588230740/table: closing flushed file at 1733593569271Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@e64253e: reopening flushed file at 1733593569291 (+20 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4ce73d42: reopening flushed file at 1733593569301 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4460b0f7: reopening flushed file at 1733593569314 (+13 ms)Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 167ms, sequenceid=11, compaction requested=false at 1733593569329 (+15 ms)Writing region close event to WAL at 1733593569331 (+2 ms)Running coprocessor post-close hooks at 1733593569339 (+8 ms)Closed at 1733593569339 2024-12-07T17:46:09,339 DEBUG [RS_CLOSE_META-regionserver/bd53b59592b3:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-07T17:46:09,361 INFO [RS:1;bd53b59592b3:44343 {}] regionserver.HRegionServer(976): stopping server bd53b59592b3,44343,1733593565149; all regions closed. 2024-12-07T17:46:09,361 INFO [RS:0;bd53b59592b3:36517 {}] regionserver.HRegionServer(976): stopping server bd53b59592b3,36517,1733593565044; all regions closed. 2024-12-07T17:46:09,365 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38303 is added to blk_1073741826_1016 (size=1298) 2024-12-07T17:46:09,365 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38303 is added to blk_1073741829_1019 (size=2751) 2024-12-07T17:46:09,366 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41011 is added to blk_1073741829_1019 (size=2751) 2024-12-07T17:46:09,366 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41011 is added to blk_1073741826_1016 (size=1298) 2024-12-07T17:46:09,366 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38991 is added to blk_1073741826_1016 (size=1298) 2024-12-07T17:46:09,366 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38991 is added to blk_1073741829_1019 (size=2751) 2024-12-07T17:46:09,370 DEBUG [RS:1;bd53b59592b3:44343 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/54aa0d7e-ded0-c68b-f87e-78244f096baf/oldWALs 2024-12-07T17:46:09,370 INFO [RS:1;bd53b59592b3:44343 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL bd53b59592b3%2C44343%2C1733593565149:(num 1733593566723) 2024-12-07T17:46:09,370 DEBUG [RS:1;bd53b59592b3:44343 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T17:46:09,370 INFO [RS:1;bd53b59592b3:44343 {}] regionserver.LeaseManager(133): Closed leases 2024-12-07T17:46:09,370 DEBUG [RS:0;bd53b59592b3:36517 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/54aa0d7e-ded0-c68b-f87e-78244f096baf/oldWALs 2024-12-07T17:46:09,370 INFO [RS:1;bd53b59592b3:44343 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-07T17:46:09,370 INFO [RS:0;bd53b59592b3:36517 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL bd53b59592b3%2C36517%2C1733593565044.meta:.meta(num 1733593567164) 2024-12-07T17:46:09,371 INFO [RS:1;bd53b59592b3:44343 {}] hbase.ChoreService(370): Chore service for: regionserver/bd53b59592b3:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-07T17:46:09,371 INFO [RS:1;bd53b59592b3:44343 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-07T17:46:09,371 INFO [regionserver/bd53b59592b3:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-07T17:46:09,371 INFO [RS:1;bd53b59592b3:44343 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-07T17:46:09,371 INFO [RS:1;bd53b59592b3:44343 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-07T17:46:09,371 INFO [RS:1;bd53b59592b3:44343 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-07T17:46:09,371 INFO [RS:1;bd53b59592b3:44343 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:44343 2024-12-07T17:46:09,374 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41011 is added to blk_1073741827_1017 (size=93) 2024-12-07T17:46:09,374 WARN [Close-WAL-Writer-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(650): complete file /user/jenkins/test-data/54aa0d7e-ded0-c68b-f87e-78244f096baf/WALs/bd53b59592b3,36517,1733593565044/bd53b59592b3%2C36517%2C1733593565044.1733593566723 not finished, retry = 0 2024-12-07T17:46:09,374 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38991 is added to blk_1073741827_1017 (size=93) 2024-12-07T17:46:09,374 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38303 is added to blk_1073741827_1017 (size=93) 2024-12-07T17:46:09,381 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38303 is added to blk_-9223372036854775756_1006 (size=196) 2024-12-07T17:46:09,381 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41011 is added to blk_-9223372036854775757_1006 (size=196) 2024-12-07T17:46:09,384 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41011 is added to blk_-9223372036854775741_1008 (size=1189) 2024-12-07T17:46:09,384 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38303 is added to blk_-9223372036854775740_1008 (size=1189) 2024-12-07T17:46:09,384 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38991 is added to blk_-9223372036854775693_1015 (size=32) 2024-12-07T17:46:09,385 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38991 is added to blk_-9223372036854775725_1010 (size=34) 2024-12-07T17:46:09,385 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38303 is added to blk_-9223372036854775692_1015 (size=32) 2024-12-07T17:46:09,386 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38303 is added to blk_-9223372036854775724_1010 (size=34) 2024-12-07T17:46:09,396 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44343-0x100017040af0002, quorum=127.0.0.1:60863, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/bd53b59592b3,44343,1733593565149 2024-12-07T17:46:09,396 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35933-0x100017040af0000, quorum=127.0.0.1:60863, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-07T17:46:09,396 INFO [RS:1;bd53b59592b3:44343 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-07T17:46:09,397 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [bd53b59592b3,44343,1733593565149] 2024-12-07T17:46:09,399 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38303 is added to blk_-9223372036854775709_1013 (size=1321) 2024-12-07T17:46:09,399 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38991 is added to blk_-9223372036854775708_1013 (size=1321) 2024-12-07T17:46:09,399 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41011 is added to blk_-9223372036854775772_1004 (size=42) 2024-12-07T17:46:09,400 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38303 is added to blk_-9223372036854775773_1004 (size=42) 2024-12-07T17:46:09,412 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/bd53b59592b3,44343,1733593565149 already deleted, retry=false 2024-12-07T17:46:09,412 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; bd53b59592b3,44343,1733593565149 expired; onlineServers=1 2024-12-07T17:46:09,482 DEBUG [RS:0;bd53b59592b3:36517 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/54aa0d7e-ded0-c68b-f87e-78244f096baf/oldWALs 2024-12-07T17:46:09,482 INFO [RS:0;bd53b59592b3:36517 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL bd53b59592b3%2C36517%2C1733593565044:(num 1733593566723) 2024-12-07T17:46:09,483 DEBUG [RS:0;bd53b59592b3:36517 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T17:46:09,483 INFO [RS:0;bd53b59592b3:36517 {}] regionserver.LeaseManager(133): Closed leases 2024-12-07T17:46:09,483 INFO [RS:0;bd53b59592b3:36517 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-07T17:46:09,483 INFO [RS:0;bd53b59592b3:36517 {}] hbase.ChoreService(370): Chore service for: regionserver/bd53b59592b3:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-07T17:46:09,484 INFO [RS:0;bd53b59592b3:36517 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-07T17:46:09,484 INFO [regionserver/bd53b59592b3:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-07T17:46:09,484 INFO [RS:0;bd53b59592b3:36517 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:36517 2024-12-07T17:46:09,495 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35933-0x100017040af0000, quorum=127.0.0.1:60863, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-07T17:46:09,495 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36517-0x100017040af0001, quorum=127.0.0.1:60863, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/bd53b59592b3,36517,1733593565044 2024-12-07T17:46:09,496 INFO [RS:0;bd53b59592b3:36517 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-07T17:46:09,504 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [bd53b59592b3,36517,1733593565044] 2024-12-07T17:46:09,504 INFO [RS:1;bd53b59592b3:44343 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-07T17:46:09,504 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44343-0x100017040af0002, quorum=127.0.0.1:60863, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-07T17:46:09,504 INFO [RS:1;bd53b59592b3:44343 {}] regionserver.HRegionServer(1031): Exiting; stopping=bd53b59592b3,44343,1733593565149; zookeeper connection closed. 2024-12-07T17:46:09,504 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44343-0x100017040af0002, quorum=127.0.0.1:60863, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-07T17:46:09,504 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@b639253 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@b639253 2024-12-07T17:46:09,512 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/bd53b59592b3,36517,1733593565044 already deleted, retry=false 2024-12-07T17:46:09,512 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; bd53b59592b3,36517,1733593565044 expired; onlineServers=0 2024-12-07T17:46:09,512 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'bd53b59592b3,35933,1733593564405' ***** 2024-12-07T17:46:09,512 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-07T17:46:09,513 INFO [M:0;bd53b59592b3:35933 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-07T17:46:09,513 INFO [M:0;bd53b59592b3:35933 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-07T17:46:09,513 DEBUG [M:0;bd53b59592b3:35933 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-07T17:46:09,513 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-07T17:46:09,513 DEBUG [M:0;bd53b59592b3:35933 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-07T17:46:09,513 DEBUG [master/bd53b59592b3:0:becomeActiveMaster-HFileCleaner.large.0-1733593566341 {}] cleaner.HFileCleaner(306): Exit Thread[master/bd53b59592b3:0:becomeActiveMaster-HFileCleaner.large.0-1733593566341,5,FailOnTimeoutGroup] 2024-12-07T17:46:09,513 DEBUG [master/bd53b59592b3:0:becomeActiveMaster-HFileCleaner.small.0-1733593566344 {}] cleaner.HFileCleaner(306): Exit Thread[master/bd53b59592b3:0:becomeActiveMaster-HFileCleaner.small.0-1733593566344,5,FailOnTimeoutGroup] 2024-12-07T17:46:09,514 INFO [M:0;bd53b59592b3:35933 {}] hbase.ChoreService(370): Chore service for: master/bd53b59592b3:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-07T17:46:09,514 INFO [M:0;bd53b59592b3:35933 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-07T17:46:09,514 DEBUG [M:0;bd53b59592b3:35933 {}] master.HMaster(1795): Stopping service threads 2024-12-07T17:46:09,514 INFO [M:0;bd53b59592b3:35933 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-07T17:46:09,514 INFO [M:0;bd53b59592b3:35933 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-07T17:46:09,515 INFO [M:0;bd53b59592b3:35933 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-07T17:46:09,515 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-07T17:46:09,520 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35933-0x100017040af0000, quorum=127.0.0.1:60863, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-07T17:46:09,521 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35933-0x100017040af0000, quorum=127.0.0.1:60863, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T17:46:09,521 DEBUG [M:0;bd53b59592b3:35933 {}] zookeeper.ZKUtil(347): master:35933-0x100017040af0000, quorum=127.0.0.1:60863, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-07T17:46:09,521 WARN [M:0;bd53b59592b3:35933 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-07T17:46:09,522 INFO [M:0;bd53b59592b3:35933 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:35255/user/jenkins/test-data/54aa0d7e-ded0-c68b-f87e-78244f096baf/.lastflushedseqids 2024-12-07T17:46:09,530 WARN [M:0;bd53b59592b3:35933 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-07T17:46:09,530 WARN [M:0;bd53b59592b3:35933 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-07T17:46:09,533 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-941362637_22 at /127.0.0.1:57750 [Receiving block BP-1730434801-172.17.0.2-1733593560554:blk_-9223372036854775584_1032] {}] datanode.DataXceiver(331): 127.0.0.1:38303:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:57750 dst: /127.0.0.1:38303 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T17:46:09,536 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38303 is added to blk_-9223372036854775584_1033 (size=127) 2024-12-07T17:46:09,537 WARN [M:0;bd53b59592b3:35933 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-07T17:46:09,537 INFO [M:0;bd53b59592b3:35933 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-07T17:46:09,537 INFO [M:0;bd53b59592b3:35933 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-07T17:46:09,537 DEBUG [M:0;bd53b59592b3:35933 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-07T17:46:09,537 INFO [M:0;bd53b59592b3:35933 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T17:46:09,537 DEBUG [M:0;bd53b59592b3:35933 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T17:46:09,537 DEBUG [M:0;bd53b59592b3:35933 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-07T17:46:09,537 DEBUG [M:0;bd53b59592b3:35933 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T17:46:09,538 INFO [M:0;bd53b59592b3:35933 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=26.82 KB heapSize=34.11 KB 2024-12-07T17:46:09,559 DEBUG [M:0;bd53b59592b3:35933 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35255/user/jenkins/test-data/54aa0d7e-ded0-c68b-f87e-78244f096baf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/9a9be1393df94750ac8228d7cf9845a4 is 82, key is hbase:meta,,1/info:regioninfo/1733593567239/Put/seqid=0 2024-12-07T17:46:09,561 WARN [M:0;bd53b59592b3:35933 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-07T17:46:09,561 WARN [M:0;bd53b59592b3:35933 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-07T17:46:09,564 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-941362637_22 at /127.0.0.1:55044 [Receiving block BP-1730434801-172.17.0.2-1733593560554:blk_-9223372036854775568_1034] {}] datanode.DataXceiver(331): 127.0.0.1:38991:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:55044 dst: /127.0.0.1:38991 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T17:46:09,568 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38991 is added to blk_-9223372036854775568_1035 (size=5672) 2024-12-07T17:46:09,568 WARN [M:0;bd53b59592b3:35933 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-07T17:46:09,568 INFO [M:0;bd53b59592b3:35933 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:35255/user/jenkins/test-data/54aa0d7e-ded0-c68b-f87e-78244f096baf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/9a9be1393df94750ac8228d7cf9845a4 2024-12-07T17:46:09,592 DEBUG [M:0;bd53b59592b3:35933 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35255/user/jenkins/test-data/54aa0d7e-ded0-c68b-f87e-78244f096baf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/5a322eaba8654d7289330dfc30c35530 is 747, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1733593568432/Put/seqid=0 2024-12-07T17:46:09,594 WARN [M:0;bd53b59592b3:35933 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-07T17:46:09,594 WARN [M:0;bd53b59592b3:35933 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-07T17:46:09,597 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-941362637_22 at /127.0.0.1:55054 [Receiving block BP-1730434801-172.17.0.2-1733593560554:blk_-9223372036854775552_1036] {}] datanode.DataXceiver(331): 127.0.0.1:38991:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:55054 dst: /127.0.0.1:38991 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T17:46:09,601 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38991 is added to blk_-9223372036854775552_1037 (size=6438) 2024-12-07T17:46:09,601 WARN [M:0;bd53b59592b3:35933 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-07T17:46:09,602 INFO [M:0;bd53b59592b3:35933 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.13 KB at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:35255/user/jenkins/test-data/54aa0d7e-ded0-c68b-f87e-78244f096baf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/5a322eaba8654d7289330dfc30c35530 2024-12-07T17:46:09,604 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36517-0x100017040af0001, quorum=127.0.0.1:60863, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-07T17:46:09,604 INFO [RS:0;bd53b59592b3:36517 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-07T17:46:09,604 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36517-0x100017040af0001, quorum=127.0.0.1:60863, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-07T17:46:09,604 INFO [RS:0;bd53b59592b3:36517 {}] regionserver.HRegionServer(1031): Exiting; stopping=bd53b59592b3,36517,1733593565044; zookeeper connection closed. 2024-12-07T17:46:09,604 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@562e6158 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@562e6158 2024-12-07T17:46:09,605 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 3 regionserver(s) complete 2024-12-07T17:46:09,625 DEBUG [M:0;bd53b59592b3:35933 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35255/user/jenkins/test-data/54aa0d7e-ded0-c68b-f87e-78244f096baf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/8e20b2f60121480ebf79a830d2bdf643 is 69, key is bd53b59592b3,36517,1733593565044/rs:state/1733593566443/Put/seqid=0 2024-12-07T17:46:09,627 WARN [M:0;bd53b59592b3:35933 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-07T17:46:09,627 WARN [M:0;bd53b59592b3:35933 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-07T17:46:09,629 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-941362637_22 at /127.0.0.1:57770 [Receiving block BP-1730434801-172.17.0.2-1733593560554:blk_-9223372036854775536_1038] {}] datanode.DataXceiver(331): 127.0.0.1:38303:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:57770 dst: /127.0.0.1:38303 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T17:46:09,634 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38303 is added to blk_-9223372036854775536_1039 (size=5294) 2024-12-07T17:46:09,634 WARN [M:0;bd53b59592b3:35933 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-07T17:46:09,634 INFO [M:0;bd53b59592b3:35933 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=195 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:35255/user/jenkins/test-data/54aa0d7e-ded0-c68b-f87e-78244f096baf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/8e20b2f60121480ebf79a830d2bdf643 2024-12-07T17:46:09,642 DEBUG [M:0;bd53b59592b3:35933 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35255/user/jenkins/test-data/54aa0d7e-ded0-c68b-f87e-78244f096baf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/9a9be1393df94750ac8228d7cf9845a4 as hdfs://localhost:35255/user/jenkins/test-data/54aa0d7e-ded0-c68b-f87e-78244f096baf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/9a9be1393df94750ac8228d7cf9845a4 2024-12-07T17:46:09,649 INFO [M:0;bd53b59592b3:35933 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35255/user/jenkins/test-data/54aa0d7e-ded0-c68b-f87e-78244f096baf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/9a9be1393df94750ac8228d7cf9845a4, entries=8, sequenceid=72, filesize=5.5 K 2024-12-07T17:46:09,650 DEBUG [M:0;bd53b59592b3:35933 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35255/user/jenkins/test-data/54aa0d7e-ded0-c68b-f87e-78244f096baf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/5a322eaba8654d7289330dfc30c35530 as hdfs://localhost:35255/user/jenkins/test-data/54aa0d7e-ded0-c68b-f87e-78244f096baf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/5a322eaba8654d7289330dfc30c35530 2024-12-07T17:46:09,658 INFO [M:0;bd53b59592b3:35933 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35255/user/jenkins/test-data/54aa0d7e-ded0-c68b-f87e-78244f096baf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/5a322eaba8654d7289330dfc30c35530, entries=8, sequenceid=72, filesize=6.3 K 2024-12-07T17:46:09,660 DEBUG [M:0;bd53b59592b3:35933 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35255/user/jenkins/test-data/54aa0d7e-ded0-c68b-f87e-78244f096baf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/8e20b2f60121480ebf79a830d2bdf643 as hdfs://localhost:35255/user/jenkins/test-data/54aa0d7e-ded0-c68b-f87e-78244f096baf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/8e20b2f60121480ebf79a830d2bdf643 2024-12-07T17:46:09,668 INFO [M:0;bd53b59592b3:35933 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35255/user/jenkins/test-data/54aa0d7e-ded0-c68b-f87e-78244f096baf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/8e20b2f60121480ebf79a830d2bdf643, entries=3, sequenceid=72, filesize=5.2 K 2024-12-07T17:46:09,669 INFO [M:0;bd53b59592b3:35933 {}] regionserver.HRegion(3140): Finished flush of dataSize ~26.82 KB/27459, heapSize ~33.81 KB/34624, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 132ms, sequenceid=72, compaction requested=false 2024-12-07T17:46:09,670 INFO [M:0;bd53b59592b3:35933 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T17:46:09,670 DEBUG [M:0;bd53b59592b3:35933 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733593569537Disabling compacts and flushes for region at 1733593569537Disabling writes for close at 1733593569537Obtaining lock to block concurrent updates at 1733593569538 (+1 ms)Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733593569538Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=27459, getHeapSize=34864, getOffHeapSize=0, getCellsCount=85 at 1733593569538Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733593569539 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733593569539Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733593569558 (+19 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733593569558Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733593569575 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733593569592 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733593569592Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733593569609 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733593569624 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733593569624Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@45d4f426: reopening flushed file at 1733593569641 (+17 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4956bc85: reopening flushed file at 1733593569649 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@25bdc7e2: reopening flushed file at 1733593569659 (+10 ms)Finished flush of dataSize ~26.82 KB/27459, heapSize ~33.81 KB/34624, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 132ms, sequenceid=72, compaction requested=false at 1733593569669 (+10 ms)Writing region close event to WAL at 1733593569670 (+1 ms)Closed at 1733593569670 2024-12-07T17:46:09,673 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41011 is added to blk_1073741825_1011 (size=32662) 2024-12-07T17:46:09,673 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38991 is added to blk_1073741825_1011 (size=32662) 2024-12-07T17:46:09,673 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38303 is added to blk_1073741825_1011 (size=32662) 2024-12-07T17:46:09,674 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-07T17:46:09,674 INFO [M:0;bd53b59592b3:35933 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-07T17:46:09,674 INFO [M:0;bd53b59592b3:35933 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:35933 2024-12-07T17:46:09,675 INFO [M:0;bd53b59592b3:35933 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-07T17:46:09,804 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35933-0x100017040af0000, quorum=127.0.0.1:60863, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-07T17:46:09,805 INFO [M:0;bd53b59592b3:35933 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-07T17:46:09,805 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35933-0x100017040af0000, quorum=127.0.0.1:60863, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-07T17:46:09,859 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2e59159d{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-07T17:46:09,862 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@a8e922f{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-07T17:46:09,862 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-07T17:46:09,863 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@24f92c39{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-07T17:46:09,863 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@c62369b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9ab1ebba-00a0-b029-e9e0-42b71c7e5d49/hadoop.log.dir/,STOPPED} 2024-12-07T17:46:09,867 WARN [BP-1730434801-172.17.0.2-1733593560554 heartbeating to localhost/127.0.0.1:35255 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-07T17:46:09,867 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-07T17:46:09,867 WARN [BP-1730434801-172.17.0.2-1733593560554 heartbeating to localhost/127.0.0.1:35255 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1730434801-172.17.0.2-1733593560554 (Datanode Uuid 115d20bd-56af-4935-b69b-458a196ec151) service to localhost/127.0.0.1:35255 2024-12-07T17:46:09,867 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-07T17:46:09,869 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9ab1ebba-00a0-b029-e9e0-42b71c7e5d49/cluster_5cbc8343-3cb0-70c7-7ccf-ef2708808c54/data/data5/current/BP-1730434801-172.17.0.2-1733593560554 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-07T17:46:09,869 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9ab1ebba-00a0-b029-e9e0-42b71c7e5d49/cluster_5cbc8343-3cb0-70c7-7ccf-ef2708808c54/data/data6/current/BP-1730434801-172.17.0.2-1733593560554 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-07T17:46:09,869 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-07T17:46:09,873 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1c6b8f01{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-07T17:46:09,873 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@11f28dd2{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-07T17:46:09,873 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-07T17:46:09,873 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7fa8fa5c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-07T17:46:09,873 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6463ad04{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9ab1ebba-00a0-b029-e9e0-42b71c7e5d49/hadoop.log.dir/,STOPPED} 2024-12-07T17:46:09,875 WARN [BP-1730434801-172.17.0.2-1733593560554 heartbeating to localhost/127.0.0.1:35255 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-07T17:46:09,875 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-07T17:46:09,875 WARN [BP-1730434801-172.17.0.2-1733593560554 heartbeating to localhost/127.0.0.1:35255 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1730434801-172.17.0.2-1733593560554 (Datanode Uuid b159a9a8-a08c-46b8-a779-5f00ed87531a) service to localhost/127.0.0.1:35255 2024-12-07T17:46:09,875 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-07T17:46:09,875 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9ab1ebba-00a0-b029-e9e0-42b71c7e5d49/cluster_5cbc8343-3cb0-70c7-7ccf-ef2708808c54/data/data3/current/BP-1730434801-172.17.0.2-1733593560554 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-07T17:46:09,875 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9ab1ebba-00a0-b029-e9e0-42b71c7e5d49/cluster_5cbc8343-3cb0-70c7-7ccf-ef2708808c54/data/data4/current/BP-1730434801-172.17.0.2-1733593560554 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-07T17:46:09,875 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-07T17:46:09,878 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4839957b{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-07T17:46:09,878 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5306f615{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-07T17:46:09,878 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-07T17:46:09,878 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1a2478ad{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-07T17:46:09,878 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@550154bd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9ab1ebba-00a0-b029-e9e0-42b71c7e5d49/hadoop.log.dir/,STOPPED} 2024-12-07T17:46:09,880 WARN [BP-1730434801-172.17.0.2-1733593560554 heartbeating to localhost/127.0.0.1:35255 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-07T17:46:09,880 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-07T17:46:09,880 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-07T17:46:09,880 WARN [BP-1730434801-172.17.0.2-1733593560554 heartbeating to localhost/127.0.0.1:35255 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1730434801-172.17.0.2-1733593560554 (Datanode Uuid 542f0800-17a5-447d-b3c2-acd6b34d20d7) service to localhost/127.0.0.1:35255 2024-12-07T17:46:09,880 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9ab1ebba-00a0-b029-e9e0-42b71c7e5d49/cluster_5cbc8343-3cb0-70c7-7ccf-ef2708808c54/data/data1/current/BP-1730434801-172.17.0.2-1733593560554 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-07T17:46:09,880 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9ab1ebba-00a0-b029-e9e0-42b71c7e5d49/cluster_5cbc8343-3cb0-70c7-7ccf-ef2708808c54/data/data2/current/BP-1730434801-172.17.0.2-1733593560554 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-07T17:46:09,881 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-07T17:46:09,888 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@76e4c45c{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-07T17:46:09,888 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4637aff6{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-07T17:46:09,888 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-07T17:46:09,889 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@383d55e4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-07T17:46:09,889 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@21b7d177{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9ab1ebba-00a0-b029-e9e0-42b71c7e5d49/hadoop.log.dir/,STOPPED} 2024-12-07T17:46:09,896 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-07T17:46:09,922 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-07T17:46:09,928 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestHBaseWalOnEC#testReadWrite[0] Thread=93 (was 161), OpenFileDescriptor=439 (was 391) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=164 (was 158) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=20104 (was 20419) 2024-12-07T17:46:09,934 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestHBaseWalOnEC#testReadWrite[1] Thread=93, OpenFileDescriptor=439, MaxFileDescriptor=1048576, SystemLoadAverage=164, ProcessCount=11, AvailableMemoryMB=20104 2024-12-07T17:46:09,934 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=3, rsPorts=, rsClass=null, numDataNodes=3, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-07T17:46:09,934 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9ab1ebba-00a0-b029-e9e0-42b71c7e5d49/hadoop.log.dir so I do NOT create it in target/test-data/3244666e-e0da-5ef4-db12-4bcfe7fb926c 2024-12-07T17:46:09,934 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9ab1ebba-00a0-b029-e9e0-42b71c7e5d49/hadoop.tmp.dir so I do NOT create it in target/test-data/3244666e-e0da-5ef4-db12-4bcfe7fb926c 2024-12-07T17:46:09,934 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3244666e-e0da-5ef4-db12-4bcfe7fb926c/cluster_91fa9a77-5529-28e6-d5bb-5b332c274060, deleteOnExit=true 2024-12-07T17:46:09,934 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-07T17:46:09,934 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3244666e-e0da-5ef4-db12-4bcfe7fb926c/test.cache.data in system properties and HBase conf 2024-12-07T17:46:09,935 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3244666e-e0da-5ef4-db12-4bcfe7fb926c/hadoop.tmp.dir in system properties and HBase conf 2024-12-07T17:46:09,935 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3244666e-e0da-5ef4-db12-4bcfe7fb926c/hadoop.log.dir in system properties and HBase conf 2024-12-07T17:46:09,935 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3244666e-e0da-5ef4-db12-4bcfe7fb926c/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-07T17:46:09,935 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3244666e-e0da-5ef4-db12-4bcfe7fb926c/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-07T17:46:09,935 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-07T17:46:09,935 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-07T17:46:09,935 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3244666e-e0da-5ef4-db12-4bcfe7fb926c/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-07T17:46:09,935 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3244666e-e0da-5ef4-db12-4bcfe7fb926c/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-07T17:46:09,935 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3244666e-e0da-5ef4-db12-4bcfe7fb926c/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-07T17:46:09,936 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3244666e-e0da-5ef4-db12-4bcfe7fb926c/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-07T17:46:09,936 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3244666e-e0da-5ef4-db12-4bcfe7fb926c/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-07T17:46:09,936 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3244666e-e0da-5ef4-db12-4bcfe7fb926c/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-07T17:46:09,936 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3244666e-e0da-5ef4-db12-4bcfe7fb926c/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-07T17:46:09,936 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3244666e-e0da-5ef4-db12-4bcfe7fb926c/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-07T17:46:09,936 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3244666e-e0da-5ef4-db12-4bcfe7fb926c/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-07T17:46:09,936 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3244666e-e0da-5ef4-db12-4bcfe7fb926c/nfs.dump.dir in system properties and HBase conf 2024-12-07T17:46:09,936 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3244666e-e0da-5ef4-db12-4bcfe7fb926c/java.io.tmpdir in system properties and HBase conf 2024-12-07T17:46:09,936 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3244666e-e0da-5ef4-db12-4bcfe7fb926c/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-07T17:46:09,936 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3244666e-e0da-5ef4-db12-4bcfe7fb926c/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-07T17:46:09,936 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3244666e-e0da-5ef4-db12-4bcfe7fb926c/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-07T17:46:10,192 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-07T17:46:10,196 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-07T17:46:10,197 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-07T17:46:10,197 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-07T17:46:10,197 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-07T17:46:10,199 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-07T17:46:10,200 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@62802e4c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3244666e-e0da-5ef4-db12-4bcfe7fb926c/hadoop.log.dir/,AVAILABLE} 2024-12-07T17:46:10,200 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@686c9dd5{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-07T17:46:10,289 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@de17eef{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3244666e-e0da-5ef4-db12-4bcfe7fb926c/java.io.tmpdir/jetty-localhost-35891-hadoop-hdfs-3_4_1-tests_jar-_-any-9682437062829989819/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-07T17:46:10,289 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2d329a96{HTTP/1.1, (http/1.1)}{localhost:35891} 2024-12-07T17:46:10,289 INFO [Time-limited test {}] server.Server(415): Started @11361ms 2024-12-07T17:46:10,503 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-07T17:46:10,506 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-07T17:46:10,507 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-07T17:46:10,507 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-07T17:46:10,507 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-07T17:46:10,508 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@20a0e688{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3244666e-e0da-5ef4-db12-4bcfe7fb926c/hadoop.log.dir/,AVAILABLE} 2024-12-07T17:46:10,508 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7d6118e0{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-07T17:46:10,597 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@14e9278c{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3244666e-e0da-5ef4-db12-4bcfe7fb926c/java.io.tmpdir/jetty-localhost-40697-hadoop-hdfs-3_4_1-tests_jar-_-any-10614805985608530211/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-07T17:46:10,597 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@18099ade{HTTP/1.1, (http/1.1)}{localhost:40697} 2024-12-07T17:46:10,597 INFO [Time-limited test {}] server.Server(415): Started @11669ms 2024-12-07T17:46:10,598 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-07T17:46:10,630 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-07T17:46:10,633 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-07T17:46:10,633 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-07T17:46:10,633 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-07T17:46:10,633 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-07T17:46:10,634 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1b0441b5{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3244666e-e0da-5ef4-db12-4bcfe7fb926c/hadoop.log.dir/,AVAILABLE} 2024-12-07T17:46:10,634 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1768a8c1{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-07T17:46:10,725 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@77b0dcb8{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3244666e-e0da-5ef4-db12-4bcfe7fb926c/java.io.tmpdir/jetty-localhost-42695-hadoop-hdfs-3_4_1-tests_jar-_-any-16168060738057405204/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-07T17:46:10,726 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@63c7dc3a{HTTP/1.1, (http/1.1)}{localhost:42695} 2024-12-07T17:46:10,726 INFO [Time-limited test {}] server.Server(415): Started @11797ms 2024-12-07T17:46:10,727 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-07T17:46:10,753 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-07T17:46:10,756 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-07T17:46:10,757 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-07T17:46:10,757 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-07T17:46:10,757 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-07T17:46:10,757 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@438bc7ce{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3244666e-e0da-5ef4-db12-4bcfe7fb926c/hadoop.log.dir/,AVAILABLE} 2024-12-07T17:46:10,758 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@19dff04d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-07T17:46:10,847 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@42d862a7{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3244666e-e0da-5ef4-db12-4bcfe7fb926c/java.io.tmpdir/jetty-localhost-34665-hadoop-hdfs-3_4_1-tests_jar-_-any-9308004875416649383/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-07T17:46:10,847 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@e8938f2{HTTP/1.1, (http/1.1)}{localhost:34665} 2024-12-07T17:46:10,848 INFO [Time-limited test {}] server.Server(415): Started @11919ms 2024-12-07T17:46:10,849 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-07T17:46:11,432 WARN [Thread-568 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3244666e-e0da-5ef4-db12-4bcfe7fb926c/cluster_91fa9a77-5529-28e6-d5bb-5b332c274060/data/data1/current/BP-1349556790-172.17.0.2-1733593569958/current, will proceed with Du for space computation calculation, 2024-12-07T17:46:11,433 WARN [Thread-569 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3244666e-e0da-5ef4-db12-4bcfe7fb926c/cluster_91fa9a77-5529-28e6-d5bb-5b332c274060/data/data2/current/BP-1349556790-172.17.0.2-1733593569958/current, will proceed with Du for space computation calculation, 2024-12-07T17:46:11,452 WARN [Thread-509 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-07T17:46:11,454 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xb92a324cba22600f with lease ID 0x782919656f0eed72: Processing first storage report for DS-97917c34-4080-430d-9e05-477249b16b8f from datanode DatanodeRegistration(127.0.0.1:34327, datanodeUuid=920c9691-b090-467c-8575-67b717b45c6a, infoPort=44005, infoSecurePort=0, ipcPort=42991, storageInfo=lv=-57;cid=testClusterID;nsid=205091837;c=1733593569958) 2024-12-07T17:46:11,454 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb92a324cba22600f with lease ID 0x782919656f0eed72: from storage DS-97917c34-4080-430d-9e05-477249b16b8f node DatanodeRegistration(127.0.0.1:34327, datanodeUuid=920c9691-b090-467c-8575-67b717b45c6a, infoPort=44005, infoSecurePort=0, ipcPort=42991, storageInfo=lv=-57;cid=testClusterID;nsid=205091837;c=1733593569958), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-07T17:46:11,455 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xb92a324cba22600f with lease ID 0x782919656f0eed72: Processing first storage report for DS-f1289364-603c-4064-a82d-c394b0da384e from datanode DatanodeRegistration(127.0.0.1:34327, datanodeUuid=920c9691-b090-467c-8575-67b717b45c6a, infoPort=44005, infoSecurePort=0, ipcPort=42991, storageInfo=lv=-57;cid=testClusterID;nsid=205091837;c=1733593569958) 2024-12-07T17:46:11,455 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb92a324cba22600f with lease ID 0x782919656f0eed72: from storage DS-f1289364-603c-4064-a82d-c394b0da384e node DatanodeRegistration(127.0.0.1:34327, datanodeUuid=920c9691-b090-467c-8575-67b717b45c6a, infoPort=44005, infoSecurePort=0, ipcPort=42991, storageInfo=lv=-57;cid=testClusterID;nsid=205091837;c=1733593569958), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-07T17:46:11,694 WARN [Thread-580 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3244666e-e0da-5ef4-db12-4bcfe7fb926c/cluster_91fa9a77-5529-28e6-d5bb-5b332c274060/data/data3/current/BP-1349556790-172.17.0.2-1733593569958/current, will proceed with Du for space computation calculation, 2024-12-07T17:46:11,695 WARN [Thread-581 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3244666e-e0da-5ef4-db12-4bcfe7fb926c/cluster_91fa9a77-5529-28e6-d5bb-5b332c274060/data/data4/current/BP-1349556790-172.17.0.2-1733593569958/current, will proceed with Du for space computation calculation, 2024-12-07T17:46:11,711 WARN [Thread-532 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-07T17:46:11,714 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xda924ba19e67957a with lease ID 0x782919656f0eed73: Processing first storage report for DS-b1e21c30-2c10-4b5f-9c9a-efe3643d433f from datanode DatanodeRegistration(127.0.0.1:41297, datanodeUuid=296d6558-7640-42fa-9a2e-486ad4a715b1, infoPort=40177, infoSecurePort=0, ipcPort=46391, storageInfo=lv=-57;cid=testClusterID;nsid=205091837;c=1733593569958) 2024-12-07T17:46:11,714 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xda924ba19e67957a with lease ID 0x782919656f0eed73: from storage DS-b1e21c30-2c10-4b5f-9c9a-efe3643d433f node DatanodeRegistration(127.0.0.1:41297, datanodeUuid=296d6558-7640-42fa-9a2e-486ad4a715b1, infoPort=40177, infoSecurePort=0, ipcPort=46391, storageInfo=lv=-57;cid=testClusterID;nsid=205091837;c=1733593569958), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-07T17:46:11,714 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xda924ba19e67957a with lease ID 0x782919656f0eed73: Processing first storage report for DS-dfd6bdc7-d8cf-4d08-86df-e3ffc36c86d9 from datanode DatanodeRegistration(127.0.0.1:41297, datanodeUuid=296d6558-7640-42fa-9a2e-486ad4a715b1, infoPort=40177, infoSecurePort=0, ipcPort=46391, storageInfo=lv=-57;cid=testClusterID;nsid=205091837;c=1733593569958) 2024-12-07T17:46:11,714 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xda924ba19e67957a with lease ID 0x782919656f0eed73: from storage DS-dfd6bdc7-d8cf-4d08-86df-e3ffc36c86d9 node DatanodeRegistration(127.0.0.1:41297, datanodeUuid=296d6558-7640-42fa-9a2e-486ad4a715b1, infoPort=40177, infoSecurePort=0, ipcPort=46391, storageInfo=lv=-57;cid=testClusterID;nsid=205091837;c=1733593569958), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-07T17:46:11,803 WARN [Thread-591 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3244666e-e0da-5ef4-db12-4bcfe7fb926c/cluster_91fa9a77-5529-28e6-d5bb-5b332c274060/data/data5/current/BP-1349556790-172.17.0.2-1733593569958/current, will proceed with Du for space computation calculation, 2024-12-07T17:46:11,803 WARN [Thread-592 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3244666e-e0da-5ef4-db12-4bcfe7fb926c/cluster_91fa9a77-5529-28e6-d5bb-5b332c274060/data/data6/current/BP-1349556790-172.17.0.2-1733593569958/current, will proceed with Du for space computation calculation, 2024-12-07T17:46:11,825 WARN [Thread-554 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-07T17:46:11,827 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xe81256cb713bf231 with lease ID 0x782919656f0eed74: Processing first storage report for DS-ce26c41b-c250-4174-aae2-28bb08d7927a from datanode DatanodeRegistration(127.0.0.1:34699, datanodeUuid=769d476f-880f-4ec7-a0d9-6c6db3372f84, infoPort=34183, infoSecurePort=0, ipcPort=36997, storageInfo=lv=-57;cid=testClusterID;nsid=205091837;c=1733593569958) 2024-12-07T17:46:11,827 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe81256cb713bf231 with lease ID 0x782919656f0eed74: from storage DS-ce26c41b-c250-4174-aae2-28bb08d7927a node DatanodeRegistration(127.0.0.1:34699, datanodeUuid=769d476f-880f-4ec7-a0d9-6c6db3372f84, infoPort=34183, infoSecurePort=0, ipcPort=36997, storageInfo=lv=-57;cid=testClusterID;nsid=205091837;c=1733593569958), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-07T17:46:11,827 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xe81256cb713bf231 with lease ID 0x782919656f0eed74: Processing first storage report for DS-6b547180-fddb-49ff-856d-950cf7c755ad from datanode DatanodeRegistration(127.0.0.1:34699, datanodeUuid=769d476f-880f-4ec7-a0d9-6c6db3372f84, infoPort=34183, infoSecurePort=0, ipcPort=36997, storageInfo=lv=-57;cid=testClusterID;nsid=205091837;c=1733593569958) 2024-12-07T17:46:11,827 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe81256cb713bf231 with lease ID 0x782919656f0eed74: from storage DS-6b547180-fddb-49ff-856d-950cf7c755ad node DatanodeRegistration(127.0.0.1:34699, datanodeUuid=769d476f-880f-4ec7-a0d9-6c6db3372f84, infoPort=34183, infoSecurePort=0, ipcPort=36997, storageInfo=lv=-57;cid=testClusterID;nsid=205091837;c=1733593569958), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-07T17:46:11,895 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3244666e-e0da-5ef4-db12-4bcfe7fb926c 2024-12-07T17:46:11,899 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3244666e-e0da-5ef4-db12-4bcfe7fb926c/cluster_91fa9a77-5529-28e6-d5bb-5b332c274060/zookeeper_0, clientPort=49709, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3244666e-e0da-5ef4-db12-4bcfe7fb926c/cluster_91fa9a77-5529-28e6-d5bb-5b332c274060/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3244666e-e0da-5ef4-db12-4bcfe7fb926c/cluster_91fa9a77-5529-28e6-d5bb-5b332c274060/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-07T17:46:11,900 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=49709 2024-12-07T17:46:11,901 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T17:46:11,903 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T17:46:11,916 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41297 is added to blk_1073741825_1001 (size=7) 2024-12-07T17:46:11,916 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34699 is added to blk_1073741825_1001 (size=7) 2024-12-07T17:46:11,916 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34327 is added to blk_1073741825_1001 (size=7) 2024-12-07T17:46:11,918 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:34695/user/jenkins/test-data/8f6135a2-6946-11c3-f0e0-bb31dc5677ab with version=8 2024-12-07T17:46:11,918 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:35255/user/jenkins/test-data/54aa0d7e-ded0-c68b-f87e-78244f096baf/hbase-staging 2024-12-07T17:46:11,919 INFO [Time-limited test {}] client.ConnectionUtils(128): master/bd53b59592b3:0 server-side Connection retries=45 2024-12-07T17:46:11,920 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-07T17:46:11,920 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-07T17:46:11,920 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-07T17:46:11,920 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-07T17:46:11,920 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-07T17:46:11,920 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-07T17:46:11,920 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-07T17:46:11,921 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:38409 2024-12-07T17:46:11,922 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:38409 connecting to ZooKeeper ensemble=127.0.0.1:49709 2024-12-07T17:46:11,972 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:384090x0, quorum=127.0.0.1:49709, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-07T17:46:11,973 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:38409-0x100017060d60000 connected 2024-12-07T17:46:12,063 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T17:46:12,068 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T17:46:12,072 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:38409-0x100017060d60000, quorum=127.0.0.1:49709, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-07T17:46:12,073 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:34695/user/jenkins/test-data/8f6135a2-6946-11c3-f0e0-bb31dc5677ab, hbase.cluster.distributed=false 2024-12-07T17:46:12,075 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:38409-0x100017060d60000, quorum=127.0.0.1:49709, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-07T17:46:12,075 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=38409 2024-12-07T17:46:12,075 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=38409 2024-12-07T17:46:12,076 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=38409 2024-12-07T17:46:12,077 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=38409 2024-12-07T17:46:12,077 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=38409 2024-12-07T17:46:12,089 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/bd53b59592b3:0 server-side Connection retries=45 2024-12-07T17:46:12,089 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-07T17:46:12,089 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-07T17:46:12,089 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-07T17:46:12,089 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-07T17:46:12,089 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-07T17:46:12,089 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-07T17:46:12,090 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-07T17:46:12,090 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:33337 2024-12-07T17:46:12,091 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:33337 connecting to ZooKeeper ensemble=127.0.0.1:49709 2024-12-07T17:46:12,092 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T17:46:12,093 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T17:46:12,104 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:333370x0, quorum=127.0.0.1:49709, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-07T17:46:12,104 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:33337-0x100017060d60001 connected 2024-12-07T17:46:12,104 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33337-0x100017060d60001, quorum=127.0.0.1:49709, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-07T17:46:12,104 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-07T17:46:12,105 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-07T17:46:12,105 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33337-0x100017060d60001, quorum=127.0.0.1:49709, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-07T17:46:12,106 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33337-0x100017060d60001, quorum=127.0.0.1:49709, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-07T17:46:12,107 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=33337 2024-12-07T17:46:12,107 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=33337 2024-12-07T17:46:12,107 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=33337 2024-12-07T17:46:12,108 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=33337 2024-12-07T17:46:12,108 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=33337 2024-12-07T17:46:12,122 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/bd53b59592b3:0 server-side Connection retries=45 2024-12-07T17:46:12,122 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-07T17:46:12,122 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-07T17:46:12,122 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-07T17:46:12,122 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-07T17:46:12,122 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-07T17:46:12,122 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-07T17:46:12,122 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-07T17:46:12,123 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:41571 2024-12-07T17:46:12,124 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:41571 connecting to ZooKeeper ensemble=127.0.0.1:49709 2024-12-07T17:46:12,124 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T17:46:12,126 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T17:46:12,137 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:415710x0, quorum=127.0.0.1:49709, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-07T17:46:12,137 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41571-0x100017060d60002, quorum=127.0.0.1:49709, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-07T17:46:12,137 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:41571-0x100017060d60002 connected 2024-12-07T17:46:12,138 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-07T17:46:12,138 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-07T17:46:12,139 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41571-0x100017060d60002, quorum=127.0.0.1:49709, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-07T17:46:12,140 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41571-0x100017060d60002, quorum=127.0.0.1:49709, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-07T17:46:12,141 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=41571 2024-12-07T17:46:12,141 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=41571 2024-12-07T17:46:12,142 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=41571 2024-12-07T17:46:12,142 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=41571 2024-12-07T17:46:12,142 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=41571 2024-12-07T17:46:12,157 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/bd53b59592b3:0 server-side Connection retries=45 2024-12-07T17:46:12,157 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-07T17:46:12,157 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-07T17:46:12,157 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-07T17:46:12,157 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-07T17:46:12,157 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-07T17:46:12,157 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-07T17:46:12,157 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-07T17:46:12,158 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:35059 2024-12-07T17:46:12,159 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:35059 connecting to ZooKeeper ensemble=127.0.0.1:49709 2024-12-07T17:46:12,159 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T17:46:12,160 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T17:46:12,170 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:350590x0, quorum=127.0.0.1:49709, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-07T17:46:12,171 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:350590x0, quorum=127.0.0.1:49709, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-07T17:46:12,171 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:35059-0x100017060d60003 connected 2024-12-07T17:46:12,171 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-07T17:46:12,171 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-07T17:46:12,172 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35059-0x100017060d60003, quorum=127.0.0.1:49709, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-07T17:46:12,173 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35059-0x100017060d60003, quorum=127.0.0.1:49709, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-07T17:46:12,174 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=35059 2024-12-07T17:46:12,174 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=35059 2024-12-07T17:46:12,175 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=35059 2024-12-07T17:46:12,175 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=35059 2024-12-07T17:46:12,176 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=35059 2024-12-07T17:46:12,189 DEBUG [M:0;bd53b59592b3:38409 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;bd53b59592b3:38409 2024-12-07T17:46:12,189 INFO [master/bd53b59592b3:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/bd53b59592b3,38409,1733593571919 2024-12-07T17:46:12,195 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33337-0x100017060d60001, quorum=127.0.0.1:49709, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-07T17:46:12,195 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41571-0x100017060d60002, quorum=127.0.0.1:49709, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-07T17:46:12,195 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35059-0x100017060d60003, quorum=127.0.0.1:49709, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-07T17:46:12,195 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38409-0x100017060d60000, quorum=127.0.0.1:49709, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-07T17:46:12,196 DEBUG [master/bd53b59592b3:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:38409-0x100017060d60000, quorum=127.0.0.1:49709, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/bd53b59592b3,38409,1733593571919 2024-12-07T17:46:12,203 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41571-0x100017060d60002, quorum=127.0.0.1:49709, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-07T17:46:12,203 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38409-0x100017060d60000, quorum=127.0.0.1:49709, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T17:46:12,203 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33337-0x100017060d60001, quorum=127.0.0.1:49709, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-07T17:46:12,204 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33337-0x100017060d60001, quorum=127.0.0.1:49709, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T17:46:12,204 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41571-0x100017060d60002, quorum=127.0.0.1:49709, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T17:46:12,204 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35059-0x100017060d60003, quorum=127.0.0.1:49709, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-07T17:46:12,204 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35059-0x100017060d60003, quorum=127.0.0.1:49709, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T17:46:12,204 DEBUG [master/bd53b59592b3:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:38409-0x100017060d60000, quorum=127.0.0.1:49709, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-07T17:46:12,205 INFO [master/bd53b59592b3:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/bd53b59592b3,38409,1733593571919 from backup master directory 2024-12-07T17:46:12,212 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38409-0x100017060d60000, quorum=127.0.0.1:49709, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/bd53b59592b3,38409,1733593571919 2024-12-07T17:46:12,212 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35059-0x100017060d60003, quorum=127.0.0.1:49709, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-07T17:46:12,212 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33337-0x100017060d60001, quorum=127.0.0.1:49709, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-07T17:46:12,212 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41571-0x100017060d60002, quorum=127.0.0.1:49709, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-07T17:46:12,212 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38409-0x100017060d60000, quorum=127.0.0.1:49709, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-07T17:46:12,212 WARN [master/bd53b59592b3:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-07T17:46:12,212 INFO [master/bd53b59592b3:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=bd53b59592b3,38409,1733593571919 2024-12-07T17:46:12,218 DEBUG [master/bd53b59592b3:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:34695/user/jenkins/test-data/8f6135a2-6946-11c3-f0e0-bb31dc5677ab/hbase.id] with ID: 87eedd31-cc0e-4d73-8464-9345f6f41299 2024-12-07T17:46:12,218 DEBUG [master/bd53b59592b3:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:34695/user/jenkins/test-data/8f6135a2-6946-11c3-f0e0-bb31dc5677ab/.tmp/hbase.id 2024-12-07T17:46:12,226 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34327 is added to blk_1073741826_1002 (size=42) 2024-12-07T17:46:12,227 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34699 is added to blk_1073741826_1002 (size=42) 2024-12-07T17:46:12,227 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41297 is added to blk_1073741826_1002 (size=42) 2024-12-07T17:46:12,228 DEBUG [master/bd53b59592b3:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:34695/user/jenkins/test-data/8f6135a2-6946-11c3-f0e0-bb31dc5677ab/.tmp/hbase.id]:[hdfs://localhost:34695/user/jenkins/test-data/8f6135a2-6946-11c3-f0e0-bb31dc5677ab/hbase.id] 2024-12-07T17:46:12,244 INFO [master/bd53b59592b3:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T17:46:12,245 INFO [master/bd53b59592b3:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-07T17:46:12,246 INFO [master/bd53b59592b3:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-12-07T17:46:12,253 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41571-0x100017060d60002, quorum=127.0.0.1:49709, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T17:46:12,253 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38409-0x100017060d60000, quorum=127.0.0.1:49709, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T17:46:12,253 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33337-0x100017060d60001, quorum=127.0.0.1:49709, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T17:46:12,253 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35059-0x100017060d60003, quorum=127.0.0.1:49709, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T17:46:12,263 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41297 is added to blk_1073741827_1003 (size=196) 2024-12-07T17:46:12,263 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34699 is added to blk_1073741827_1003 (size=196) 2024-12-07T17:46:12,263 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34327 is added to blk_1073741827_1003 (size=196) 2024-12-07T17:46:12,264 INFO [master/bd53b59592b3:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-07T17:46:12,266 INFO [master/bd53b59592b3:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-07T17:46:12,266 INFO [master/bd53b59592b3:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-07T17:46:12,278 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41297 is added to blk_1073741828_1004 (size=1189) 2024-12-07T17:46:12,279 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34699 is added to blk_1073741828_1004 (size=1189) 2024-12-07T17:46:12,279 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34327 is added to blk_1073741828_1004 (size=1189) 2024-12-07T17:46:12,280 INFO [master/bd53b59592b3:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:34695/user/jenkins/test-data/8f6135a2-6946-11c3-f0e0-bb31dc5677ab/MasterData/data/master/store 2024-12-07T17:46:12,290 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34699 is added to blk_1073741829_1005 (size=34) 2024-12-07T17:46:12,290 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34327 is added to blk_1073741829_1005 (size=34) 2024-12-07T17:46:12,290 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41297 is added to blk_1073741829_1005 (size=34) 2024-12-07T17:46:12,291 DEBUG [master/bd53b59592b3:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T17:46:12,291 DEBUG [master/bd53b59592b3:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-07T17:46:12,291 INFO [master/bd53b59592b3:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T17:46:12,291 DEBUG [master/bd53b59592b3:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T17:46:12,291 DEBUG [master/bd53b59592b3:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-07T17:46:12,291 DEBUG [master/bd53b59592b3:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T17:46:12,291 INFO [master/bd53b59592b3:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T17:46:12,291 DEBUG [master/bd53b59592b3:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733593572291Disabling compacts and flushes for region at 1733593572291Disabling writes for close at 1733593572291Writing region close event to WAL at 1733593572291Closed at 1733593572291 2024-12-07T17:46:12,292 WARN [master/bd53b59592b3:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:34695/user/jenkins/test-data/8f6135a2-6946-11c3-f0e0-bb31dc5677ab/MasterData/data/master/store/.initializing 2024-12-07T17:46:12,292 DEBUG [master/bd53b59592b3:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:34695/user/jenkins/test-data/8f6135a2-6946-11c3-f0e0-bb31dc5677ab/MasterData/WALs/bd53b59592b3,38409,1733593571919 2024-12-07T17:46:12,296 INFO [master/bd53b59592b3:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=bd53b59592b3%2C38409%2C1733593571919, suffix=, logDir=hdfs://localhost:34695/user/jenkins/test-data/8f6135a2-6946-11c3-f0e0-bb31dc5677ab/MasterData/WALs/bd53b59592b3,38409,1733593571919, archiveDir=hdfs://localhost:34695/user/jenkins/test-data/8f6135a2-6946-11c3-f0e0-bb31dc5677ab/MasterData/oldWALs, maxLogs=10 2024-12-07T17:46:12,297 INFO [master/bd53b59592b3:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor bd53b59592b3%2C38409%2C1733593571919.1733593572296 2024-12-07T17:46:12,306 INFO [master/bd53b59592b3:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/8f6135a2-6946-11c3-f0e0-bb31dc5677ab/MasterData/WALs/bd53b59592b3,38409,1733593571919/bd53b59592b3%2C38409%2C1733593571919.1733593572296 2024-12-07T17:46:12,310 DEBUG [master/bd53b59592b3:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40177:40177),(127.0.0.1/127.0.0.1:34183:34183),(127.0.0.1/127.0.0.1:44005:44005)] 2024-12-07T17:46:12,311 DEBUG [master/bd53b59592b3:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-07T17:46:12,311 DEBUG [master/bd53b59592b3:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T17:46:12,311 DEBUG [master/bd53b59592b3:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-07T17:46:12,311 DEBUG [master/bd53b59592b3:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-07T17:46:12,313 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-07T17:46:12,314 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-07T17:46:12,315 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T17:46:12,315 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T17:46:12,315 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-07T17:46:12,317 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-07T17:46:12,317 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T17:46:12,318 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T17:46:12,318 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-07T17:46:12,320 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-07T17:46:12,320 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T17:46:12,321 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T17:46:12,321 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-07T17:46:12,323 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-07T17:46:12,323 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T17:46:12,324 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T17:46:12,324 DEBUG [master/bd53b59592b3:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-07T17:46:12,325 DEBUG [master/bd53b59592b3:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34695/user/jenkins/test-data/8f6135a2-6946-11c3-f0e0-bb31dc5677ab/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-07T17:46:12,326 DEBUG [master/bd53b59592b3:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34695/user/jenkins/test-data/8f6135a2-6946-11c3-f0e0-bb31dc5677ab/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-07T17:46:12,327 DEBUG [master/bd53b59592b3:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-07T17:46:12,327 DEBUG [master/bd53b59592b3:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-07T17:46:12,328 DEBUG [master/bd53b59592b3:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-07T17:46:12,329 DEBUG [master/bd53b59592b3:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-07T17:46:12,332 DEBUG [master/bd53b59592b3:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34695/user/jenkins/test-data/8f6135a2-6946-11c3-f0e0-bb31dc5677ab/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-07T17:46:12,332 INFO [master/bd53b59592b3:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=67816060, jitterRate=0.010538041591644287}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-07T17:46:12,333 DEBUG [master/bd53b59592b3:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733593572311Initializing all the Stores at 1733593572312 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733593572312Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733593572313 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733593572313Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733593572313Cleaning up temporary data from old regions at 1733593572327 (+14 ms)Region opened successfully at 1733593572333 (+6 ms) 2024-12-07T17:46:12,334 INFO [master/bd53b59592b3:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-07T17:46:12,339 DEBUG [master/bd53b59592b3:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7426507c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=bd53b59592b3/172.17.0.2:0 2024-12-07T17:46:12,340 INFO [master/bd53b59592b3:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-07T17:46:12,340 INFO [master/bd53b59592b3:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-07T17:46:12,340 INFO [master/bd53b59592b3:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-07T17:46:12,341 INFO [master/bd53b59592b3:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-07T17:46:12,341 INFO [master/bd53b59592b3:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-12-07T17:46:12,342 INFO [master/bd53b59592b3:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-12-07T17:46:12,342 INFO [master/bd53b59592b3:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-07T17:46:12,346 INFO [master/bd53b59592b3:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-07T17:46:12,347 DEBUG [master/bd53b59592b3:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38409-0x100017060d60000, quorum=127.0.0.1:49709, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-07T17:46:12,376 DEBUG [master/bd53b59592b3:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-07T17:46:12,376 INFO [master/bd53b59592b3:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-07T17:46:12,377 DEBUG [master/bd53b59592b3:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38409-0x100017060d60000, quorum=127.0.0.1:49709, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-07T17:46:12,387 DEBUG [master/bd53b59592b3:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-07T17:46:12,387 INFO [master/bd53b59592b3:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-07T17:46:12,388 DEBUG [master/bd53b59592b3:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38409-0x100017060d60000, quorum=127.0.0.1:49709, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-07T17:46:12,395 DEBUG [master/bd53b59592b3:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-07T17:46:12,396 DEBUG [master/bd53b59592b3:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38409-0x100017060d60000, quorum=127.0.0.1:49709, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-07T17:46:12,403 DEBUG [master/bd53b59592b3:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-07T17:46:12,405 DEBUG [master/bd53b59592b3:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38409-0x100017060d60000, quorum=127.0.0.1:49709, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-07T17:46:12,412 DEBUG [master/bd53b59592b3:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-07T17:46:12,420 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38409-0x100017060d60000, quorum=127.0.0.1:49709, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-07T17:46:12,420 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41571-0x100017060d60002, quorum=127.0.0.1:49709, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-07T17:46:12,420 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35059-0x100017060d60003, quorum=127.0.0.1:49709, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-07T17:46:12,420 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33337-0x100017060d60001, quorum=127.0.0.1:49709, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-07T17:46:12,420 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38409-0x100017060d60000, quorum=127.0.0.1:49709, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T17:46:12,420 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35059-0x100017060d60003, quorum=127.0.0.1:49709, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T17:46:12,420 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41571-0x100017060d60002, quorum=127.0.0.1:49709, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T17:46:12,420 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33337-0x100017060d60001, quorum=127.0.0.1:49709, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T17:46:12,421 INFO [master/bd53b59592b3:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=bd53b59592b3,38409,1733593571919, sessionid=0x100017060d60000, setting cluster-up flag (Was=false) 2024-12-07T17:46:12,437 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41571-0x100017060d60002, quorum=127.0.0.1:49709, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T17:46:12,437 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38409-0x100017060d60000, quorum=127.0.0.1:49709, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T17:46:12,437 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33337-0x100017060d60001, quorum=127.0.0.1:49709, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T17:46:12,437 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35059-0x100017060d60003, quorum=127.0.0.1:49709, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T17:46:12,462 DEBUG [master/bd53b59592b3:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-07T17:46:12,464 DEBUG [master/bd53b59592b3:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=bd53b59592b3,38409,1733593571919 2024-12-07T17:46:12,478 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38409-0x100017060d60000, quorum=127.0.0.1:49709, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T17:46:12,478 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33337-0x100017060d60001, quorum=127.0.0.1:49709, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T17:46:12,478 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35059-0x100017060d60003, quorum=127.0.0.1:49709, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T17:46:12,478 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41571-0x100017060d60002, quorum=127.0.0.1:49709, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T17:46:12,504 DEBUG [master/bd53b59592b3:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-07T17:46:12,506 DEBUG [master/bd53b59592b3:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=bd53b59592b3,38409,1733593571919 2024-12-07T17:46:12,509 INFO [master/bd53b59592b3:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:34695/user/jenkins/test-data/8f6135a2-6946-11c3-f0e0-bb31dc5677ab/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-07T17:46:12,514 DEBUG [master/bd53b59592b3:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-07T17:46:12,514 INFO [master/bd53b59592b3:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-07T17:46:12,515 INFO [master/bd53b59592b3:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-07T17:46:12,515 DEBUG [master/bd53b59592b3:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: bd53b59592b3,38409,1733593571919 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-07T17:46:12,518 DEBUG [master/bd53b59592b3:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/bd53b59592b3:0, corePoolSize=5, maxPoolSize=5 2024-12-07T17:46:12,518 DEBUG [master/bd53b59592b3:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/bd53b59592b3:0, corePoolSize=5, maxPoolSize=5 2024-12-07T17:46:12,518 DEBUG [master/bd53b59592b3:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/bd53b59592b3:0, corePoolSize=5, maxPoolSize=5 2024-12-07T17:46:12,518 DEBUG [master/bd53b59592b3:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/bd53b59592b3:0, corePoolSize=5, maxPoolSize=5 2024-12-07T17:46:12,518 DEBUG [master/bd53b59592b3:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/bd53b59592b3:0, corePoolSize=10, maxPoolSize=10 2024-12-07T17:46:12,518 DEBUG [master/bd53b59592b3:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/bd53b59592b3:0, corePoolSize=1, maxPoolSize=1 2024-12-07T17:46:12,518 DEBUG [master/bd53b59592b3:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/bd53b59592b3:0, corePoolSize=2, maxPoolSize=2 2024-12-07T17:46:12,518 DEBUG [master/bd53b59592b3:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/bd53b59592b3:0, corePoolSize=1, maxPoolSize=1 2024-12-07T17:46:12,520 INFO [master/bd53b59592b3:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733593602519 2024-12-07T17:46:12,520 INFO [master/bd53b59592b3:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-07T17:46:12,520 INFO [master/bd53b59592b3:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-07T17:46:12,520 INFO [master/bd53b59592b3:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-07T17:46:12,520 INFO [master/bd53b59592b3:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-07T17:46:12,520 INFO [master/bd53b59592b3:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-07T17:46:12,520 INFO [master/bd53b59592b3:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-07T17:46:12,520 INFO [master/bd53b59592b3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-07T17:46:12,521 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-07T17:46:12,521 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-07T17:46:12,523 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T17:46:12,523 INFO [master/bd53b59592b3:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-07T17:46:12,523 INFO [master/bd53b59592b3:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-07T17:46:12,523 INFO [master/bd53b59592b3:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-07T17:46:12,523 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-07T17:46:12,523 INFO [master/bd53b59592b3:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-07T17:46:12,523 INFO [master/bd53b59592b3:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-07T17:46:12,524 DEBUG [master/bd53b59592b3:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/bd53b59592b3:0:becomeActiveMaster-HFileCleaner.large.0-1733593572523,5,FailOnTimeoutGroup] 2024-12-07T17:46:12,525 DEBUG [master/bd53b59592b3:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/bd53b59592b3:0:becomeActiveMaster-HFileCleaner.small.0-1733593572524,5,FailOnTimeoutGroup] 2024-12-07T17:46:12,525 INFO [master/bd53b59592b3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-07T17:46:12,525 INFO [master/bd53b59592b3:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-07T17:46:12,525 INFO [master/bd53b59592b3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-07T17:46:12,525 INFO [master/bd53b59592b3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-07T17:46:12,533 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34699 is added to blk_1073741831_1007 (size=1321) 2024-12-07T17:46:12,533 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34327 is added to blk_1073741831_1007 (size=1321) 2024-12-07T17:46:12,533 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41297 is added to blk_1073741831_1007 (size=1321) 2024-12-07T17:46:12,534 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:34695/user/jenkins/test-data/8f6135a2-6946-11c3-f0e0-bb31dc5677ab/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-07T17:46:12,534 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:34695/user/jenkins/test-data/8f6135a2-6946-11c3-f0e0-bb31dc5677ab 2024-12-07T17:46:12,542 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34327 is added to blk_1073741832_1008 (size=32) 2024-12-07T17:46:12,542 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34699 is added to blk_1073741832_1008 (size=32) 2024-12-07T17:46:12,542 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41297 is added to blk_1073741832_1008 (size=32) 2024-12-07T17:46:12,543 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T17:46:12,544 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-07T17:46:12,546 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-07T17:46:12,546 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T17:46:12,546 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T17:46:12,547 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-07T17:46:12,548 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-07T17:46:12,548 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T17:46:12,549 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T17:46:12,549 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-07T17:46:12,550 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-07T17:46:12,550 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T17:46:12,551 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T17:46:12,551 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-07T17:46:12,552 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-07T17:46:12,552 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T17:46:12,553 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T17:46:12,553 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-07T17:46:12,553 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34695/user/jenkins/test-data/8f6135a2-6946-11c3-f0e0-bb31dc5677ab/data/hbase/meta/1588230740 2024-12-07T17:46:12,554 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34695/user/jenkins/test-data/8f6135a2-6946-11c3-f0e0-bb31dc5677ab/data/hbase/meta/1588230740 2024-12-07T17:46:12,555 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-07T17:46:12,555 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-07T17:46:12,556 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-07T17:46:12,557 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-07T17:46:12,559 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34695/user/jenkins/test-data/8f6135a2-6946-11c3-f0e0-bb31dc5677ab/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-07T17:46:12,560 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=61013326, jitterRate=-0.09083059430122375}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-07T17:46:12,560 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733593572543Initializing all the Stores at 1733593572544 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733593572544Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733593572544Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733593572544Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733593572544Cleaning up temporary data from old regions at 1733593572555 (+11 ms)Region opened successfully at 1733593572560 (+5 ms) 2024-12-07T17:46:12,560 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-07T17:46:12,560 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-07T17:46:12,560 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-07T17:46:12,560 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-07T17:46:12,561 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-07T17:46:12,561 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-07T17:46:12,561 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733593572560Disabling compacts and flushes for region at 1733593572560Disabling writes for close at 1733593572561 (+1 ms)Writing region close event to WAL at 1733593572561Closed at 1733593572561 2024-12-07T17:46:12,562 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-07T17:46:12,562 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-07T17:46:12,563 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-07T17:46:12,564 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-07T17:46:12,565 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-07T17:46:12,578 INFO [RS:2;bd53b59592b3:35059 {}] regionserver.HRegionServer(746): ClusterId : 87eedd31-cc0e-4d73-8464-9345f6f41299 2024-12-07T17:46:12,578 INFO [RS:0;bd53b59592b3:33337 {}] regionserver.HRegionServer(746): ClusterId : 87eedd31-cc0e-4d73-8464-9345f6f41299 2024-12-07T17:46:12,578 INFO [RS:1;bd53b59592b3:41571 {}] regionserver.HRegionServer(746): ClusterId : 87eedd31-cc0e-4d73-8464-9345f6f41299 2024-12-07T17:46:12,578 DEBUG [RS:0;bd53b59592b3:33337 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-07T17:46:12,578 DEBUG [RS:1;bd53b59592b3:41571 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-07T17:46:12,578 DEBUG [RS:2;bd53b59592b3:35059 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-07T17:46:12,593 DEBUG [RS:1;bd53b59592b3:41571 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-07T17:46:12,593 DEBUG [RS:0;bd53b59592b3:33337 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-07T17:46:12,594 DEBUG [RS:1;bd53b59592b3:41571 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-07T17:46:12,594 DEBUG [RS:0;bd53b59592b3:33337 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-07T17:46:12,594 DEBUG [RS:2;bd53b59592b3:35059 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-07T17:46:12,594 DEBUG [RS:2;bd53b59592b3:35059 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-07T17:46:12,613 DEBUG [RS:1;bd53b59592b3:41571 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-07T17:46:12,613 DEBUG [RS:0;bd53b59592b3:33337 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-07T17:46:12,613 DEBUG [RS:2;bd53b59592b3:35059 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-07T17:46:12,614 DEBUG [RS:1;bd53b59592b3:41571 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4f1decaa, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=bd53b59592b3/172.17.0.2:0 2024-12-07T17:46:12,614 DEBUG [RS:0;bd53b59592b3:33337 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@54ca8090, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=bd53b59592b3/172.17.0.2:0 2024-12-07T17:46:12,614 DEBUG [RS:2;bd53b59592b3:35059 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@44a04224, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=bd53b59592b3/172.17.0.2:0 2024-12-07T17:46:12,627 DEBUG [RS:2;bd53b59592b3:35059 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:2;bd53b59592b3:35059 2024-12-07T17:46:12,627 INFO [RS:2;bd53b59592b3:35059 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-07T17:46:12,627 INFO [RS:2;bd53b59592b3:35059 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-07T17:46:12,627 DEBUG [RS:2;bd53b59592b3:35059 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-07T17:46:12,628 INFO [RS:2;bd53b59592b3:35059 {}] regionserver.HRegionServer(2659): reportForDuty to master=bd53b59592b3,38409,1733593571919 with port=35059, startcode=1733593572156 2024-12-07T17:46:12,629 DEBUG [RS:2;bd53b59592b3:35059 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-07T17:46:12,631 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60665, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.5 (auth:SIMPLE), service=RegionServerStatusService 2024-12-07T17:46:12,631 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38409 {}] master.ServerManager(363): Checking decommissioned status of RegionServer bd53b59592b3,35059,1733593572156 2024-12-07T17:46:12,631 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38409 {}] master.ServerManager(517): Registering regionserver=bd53b59592b3,35059,1733593572156 2024-12-07T17:46:12,633 DEBUG [RS:0;bd53b59592b3:33337 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;bd53b59592b3:33337 2024-12-07T17:46:12,633 DEBUG [RS:1;bd53b59592b3:41571 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;bd53b59592b3:41571 2024-12-07T17:46:12,633 INFO [RS:1;bd53b59592b3:41571 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-07T17:46:12,633 INFO [RS:0;bd53b59592b3:33337 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-07T17:46:12,633 INFO [RS:1;bd53b59592b3:41571 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-07T17:46:12,633 INFO [RS:0;bd53b59592b3:33337 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-07T17:46:12,633 DEBUG [RS:1;bd53b59592b3:41571 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-07T17:46:12,633 DEBUG [RS:0;bd53b59592b3:33337 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-07T17:46:12,633 DEBUG [RS:2;bd53b59592b3:35059 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:34695/user/jenkins/test-data/8f6135a2-6946-11c3-f0e0-bb31dc5677ab 2024-12-07T17:46:12,634 DEBUG [RS:2;bd53b59592b3:35059 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:34695 2024-12-07T17:46:12,634 DEBUG [RS:2;bd53b59592b3:35059 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-07T17:46:12,634 INFO [RS:0;bd53b59592b3:33337 {}] regionserver.HRegionServer(2659): reportForDuty to master=bd53b59592b3,38409,1733593571919 with port=33337, startcode=1733593572089 2024-12-07T17:46:12,634 INFO [RS:1;bd53b59592b3:41571 {}] regionserver.HRegionServer(2659): reportForDuty to master=bd53b59592b3,38409,1733593571919 with port=41571, startcode=1733593572121 2024-12-07T17:46:12,634 DEBUG [RS:0;bd53b59592b3:33337 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-07T17:46:12,634 DEBUG [RS:1;bd53b59592b3:41571 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-07T17:46:12,636 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44985, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.3 (auth:SIMPLE), service=RegionServerStatusService 2024-12-07T17:46:12,636 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58453, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.4 (auth:SIMPLE), service=RegionServerStatusService 2024-12-07T17:46:12,636 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38409 {}] master.ServerManager(363): Checking decommissioned status of RegionServer bd53b59592b3,41571,1733593572121 2024-12-07T17:46:12,637 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38409 {}] master.ServerManager(517): Registering regionserver=bd53b59592b3,41571,1733593572121 2024-12-07T17:46:12,638 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38409 {}] master.ServerManager(363): Checking decommissioned status of RegionServer bd53b59592b3,33337,1733593572089 2024-12-07T17:46:12,638 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38409 {}] master.ServerManager(517): Registering regionserver=bd53b59592b3,33337,1733593572089 2024-12-07T17:46:12,639 DEBUG [RS:1;bd53b59592b3:41571 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:34695/user/jenkins/test-data/8f6135a2-6946-11c3-f0e0-bb31dc5677ab 2024-12-07T17:46:12,639 DEBUG [RS:1;bd53b59592b3:41571 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:34695 2024-12-07T17:46:12,639 DEBUG [RS:1;bd53b59592b3:41571 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-07T17:46:12,640 DEBUG [RS:0;bd53b59592b3:33337 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:34695/user/jenkins/test-data/8f6135a2-6946-11c3-f0e0-bb31dc5677ab 2024-12-07T17:46:12,641 DEBUG [RS:0;bd53b59592b3:33337 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:34695 2024-12-07T17:46:12,641 DEBUG [RS:0;bd53b59592b3:33337 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-07T17:46:12,645 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38409-0x100017060d60000, quorum=127.0.0.1:49709, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-07T17:46:12,673 DEBUG [RS:2;bd53b59592b3:35059 {}] zookeeper.ZKUtil(111): regionserver:35059-0x100017060d60003, quorum=127.0.0.1:49709, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/bd53b59592b3,35059,1733593572156 2024-12-07T17:46:12,673 WARN [RS:2;bd53b59592b3:35059 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-07T17:46:12,673 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [bd53b59592b3,41571,1733593572121] 2024-12-07T17:46:12,673 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [bd53b59592b3,35059,1733593572156] 2024-12-07T17:46:12,673 INFO [RS:2;bd53b59592b3:35059 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-07T17:46:12,673 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [bd53b59592b3,33337,1733593572089] 2024-12-07T17:46:12,673 DEBUG [RS:2;bd53b59592b3:35059 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:34695/user/jenkins/test-data/8f6135a2-6946-11c3-f0e0-bb31dc5677ab/WALs/bd53b59592b3,35059,1733593572156 2024-12-07T17:46:12,674 DEBUG [RS:1;bd53b59592b3:41571 {}] zookeeper.ZKUtil(111): regionserver:41571-0x100017060d60002, quorum=127.0.0.1:49709, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/bd53b59592b3,41571,1733593572121 2024-12-07T17:46:12,674 DEBUG [RS:0;bd53b59592b3:33337 {}] zookeeper.ZKUtil(111): regionserver:33337-0x100017060d60001, quorum=127.0.0.1:49709, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/bd53b59592b3,33337,1733593572089 2024-12-07T17:46:12,674 WARN [RS:0;bd53b59592b3:33337 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-07T17:46:12,674 WARN [RS:1;bd53b59592b3:41571 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-07T17:46:12,674 INFO [RS:0;bd53b59592b3:33337 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-07T17:46:12,674 INFO [RS:1;bd53b59592b3:41571 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-07T17:46:12,674 DEBUG [RS:0;bd53b59592b3:33337 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:34695/user/jenkins/test-data/8f6135a2-6946-11c3-f0e0-bb31dc5677ab/WALs/bd53b59592b3,33337,1733593572089 2024-12-07T17:46:12,674 DEBUG [RS:1;bd53b59592b3:41571 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:34695/user/jenkins/test-data/8f6135a2-6946-11c3-f0e0-bb31dc5677ab/WALs/bd53b59592b3,41571,1733593572121 2024-12-07T17:46:12,681 INFO [RS:2;bd53b59592b3:35059 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-07T17:46:12,681 INFO [RS:1;bd53b59592b3:41571 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-07T17:46:12,681 INFO [RS:0;bd53b59592b3:33337 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-07T17:46:12,682 INFO [RS:2;bd53b59592b3:35059 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-07T17:46:12,683 INFO [RS:2;bd53b59592b3:35059 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-07T17:46:12,683 INFO [RS:2;bd53b59592b3:35059 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-07T17:46:12,684 INFO [RS:2;bd53b59592b3:35059 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-07T17:46:12,684 INFO [RS:0;bd53b59592b3:33337 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-07T17:46:12,687 INFO [RS:2;bd53b59592b3:35059 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-07T17:46:12,687 INFO [RS:2;bd53b59592b3:35059 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-07T17:46:12,687 DEBUG [RS:2;bd53b59592b3:35059 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/bd53b59592b3:0, corePoolSize=1, maxPoolSize=1 2024-12-07T17:46:12,687 DEBUG [RS:2;bd53b59592b3:35059 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/bd53b59592b3:0, corePoolSize=1, maxPoolSize=1 2024-12-07T17:46:12,687 DEBUG [RS:2;bd53b59592b3:35059 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/bd53b59592b3:0, corePoolSize=1, maxPoolSize=1 2024-12-07T17:46:12,687 DEBUG [RS:2;bd53b59592b3:35059 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/bd53b59592b3:0, corePoolSize=1, maxPoolSize=1 2024-12-07T17:46:12,687 DEBUG [RS:2;bd53b59592b3:35059 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/bd53b59592b3:0, corePoolSize=1, maxPoolSize=1 2024-12-07T17:46:12,688 DEBUG [RS:2;bd53b59592b3:35059 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/bd53b59592b3:0, corePoolSize=2, maxPoolSize=2 2024-12-07T17:46:12,688 DEBUG [RS:2;bd53b59592b3:35059 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/bd53b59592b3:0, corePoolSize=1, maxPoolSize=1 2024-12-07T17:46:12,688 DEBUG [RS:2;bd53b59592b3:35059 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/bd53b59592b3:0, corePoolSize=1, maxPoolSize=1 2024-12-07T17:46:12,688 DEBUG [RS:2;bd53b59592b3:35059 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/bd53b59592b3:0, corePoolSize=1, maxPoolSize=1 2024-12-07T17:46:12,688 DEBUG [RS:2;bd53b59592b3:35059 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/bd53b59592b3:0, corePoolSize=1, maxPoolSize=1 2024-12-07T17:46:12,688 DEBUG [RS:2;bd53b59592b3:35059 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/bd53b59592b3:0, corePoolSize=1, maxPoolSize=1 2024-12-07T17:46:12,688 DEBUG [RS:2;bd53b59592b3:35059 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/bd53b59592b3:0, corePoolSize=1, maxPoolSize=1 2024-12-07T17:46:12,688 DEBUG [RS:2;bd53b59592b3:35059 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/bd53b59592b3:0, corePoolSize=3, maxPoolSize=3 2024-12-07T17:46:12,688 DEBUG [RS:2;bd53b59592b3:35059 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/bd53b59592b3:0, corePoolSize=3, maxPoolSize=3 2024-12-07T17:46:12,689 INFO [RS:0;bd53b59592b3:33337 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-07T17:46:12,689 INFO [RS:1;bd53b59592b3:41571 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-07T17:46:12,689 INFO [RS:0;bd53b59592b3:33337 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-07T17:46:12,690 INFO [RS:1;bd53b59592b3:41571 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-07T17:46:12,690 INFO [RS:1;bd53b59592b3:41571 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-07T17:46:12,690 INFO [RS:0;bd53b59592b3:33337 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-07T17:46:12,690 INFO [RS:2;bd53b59592b3:35059 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-07T17:46:12,690 INFO [RS:2;bd53b59592b3:35059 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-07T17:46:12,690 INFO [RS:1;bd53b59592b3:41571 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-07T17:46:12,690 INFO [RS:2;bd53b59592b3:35059 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-07T17:46:12,690 INFO [RS:2;bd53b59592b3:35059 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-07T17:46:12,690 INFO [RS:2;bd53b59592b3:35059 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-07T17:46:12,690 INFO [RS:2;bd53b59592b3:35059 {}] hbase.ChoreService(168): Chore ScheduledChore name=bd53b59592b3,35059,1733593572156-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-07T17:46:12,691 INFO [RS:1;bd53b59592b3:41571 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-07T17:46:12,691 INFO [RS:0;bd53b59592b3:33337 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-07T17:46:12,691 INFO [RS:0;bd53b59592b3:33337 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-07T17:46:12,691 INFO [RS:1;bd53b59592b3:41571 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-07T17:46:12,691 DEBUG [RS:0;bd53b59592b3:33337 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/bd53b59592b3:0, corePoolSize=1, maxPoolSize=1 2024-12-07T17:46:12,691 DEBUG [RS:1;bd53b59592b3:41571 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/bd53b59592b3:0, corePoolSize=1, maxPoolSize=1 2024-12-07T17:46:12,691 DEBUG [RS:0;bd53b59592b3:33337 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/bd53b59592b3:0, corePoolSize=1, maxPoolSize=1 2024-12-07T17:46:12,691 DEBUG [RS:1;bd53b59592b3:41571 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/bd53b59592b3:0, corePoolSize=1, maxPoolSize=1 2024-12-07T17:46:12,691 DEBUG [RS:0;bd53b59592b3:33337 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/bd53b59592b3:0, corePoolSize=1, maxPoolSize=1 2024-12-07T17:46:12,691 DEBUG [RS:1;bd53b59592b3:41571 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/bd53b59592b3:0, corePoolSize=1, maxPoolSize=1 2024-12-07T17:46:12,691 DEBUG [RS:0;bd53b59592b3:33337 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/bd53b59592b3:0, corePoolSize=1, maxPoolSize=1 2024-12-07T17:46:12,691 DEBUG [RS:0;bd53b59592b3:33337 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/bd53b59592b3:0, corePoolSize=1, maxPoolSize=1 2024-12-07T17:46:12,691 DEBUG [RS:1;bd53b59592b3:41571 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/bd53b59592b3:0, corePoolSize=1, maxPoolSize=1 2024-12-07T17:46:12,691 DEBUG [RS:0;bd53b59592b3:33337 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/bd53b59592b3:0, corePoolSize=2, maxPoolSize=2 2024-12-07T17:46:12,692 DEBUG [RS:0;bd53b59592b3:33337 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/bd53b59592b3:0, corePoolSize=1, maxPoolSize=1 2024-12-07T17:46:12,692 DEBUG [RS:1;bd53b59592b3:41571 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/bd53b59592b3:0, corePoolSize=1, maxPoolSize=1 2024-12-07T17:46:12,692 DEBUG [RS:0;bd53b59592b3:33337 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/bd53b59592b3:0, corePoolSize=1, maxPoolSize=1 2024-12-07T17:46:12,692 DEBUG [RS:1;bd53b59592b3:41571 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/bd53b59592b3:0, corePoolSize=2, maxPoolSize=2 2024-12-07T17:46:12,692 DEBUG [RS:0;bd53b59592b3:33337 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/bd53b59592b3:0, corePoolSize=1, maxPoolSize=1 2024-12-07T17:46:12,692 DEBUG [RS:0;bd53b59592b3:33337 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/bd53b59592b3:0, corePoolSize=1, maxPoolSize=1 2024-12-07T17:46:12,692 DEBUG [RS:1;bd53b59592b3:41571 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/bd53b59592b3:0, corePoolSize=1, maxPoolSize=1 2024-12-07T17:46:12,692 DEBUG [RS:0;bd53b59592b3:33337 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/bd53b59592b3:0, corePoolSize=1, maxPoolSize=1 2024-12-07T17:46:12,692 DEBUG [RS:1;bd53b59592b3:41571 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/bd53b59592b3:0, corePoolSize=1, maxPoolSize=1 2024-12-07T17:46:12,692 DEBUG [RS:0;bd53b59592b3:33337 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/bd53b59592b3:0, corePoolSize=1, maxPoolSize=1 2024-12-07T17:46:12,692 DEBUG [RS:1;bd53b59592b3:41571 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/bd53b59592b3:0, corePoolSize=1, maxPoolSize=1 2024-12-07T17:46:12,692 DEBUG [RS:0;bd53b59592b3:33337 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/bd53b59592b3:0, corePoolSize=3, maxPoolSize=3 2024-12-07T17:46:12,692 DEBUG [RS:1;bd53b59592b3:41571 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/bd53b59592b3:0, corePoolSize=1, maxPoolSize=1 2024-12-07T17:46:12,692 DEBUG [RS:0;bd53b59592b3:33337 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/bd53b59592b3:0, corePoolSize=3, maxPoolSize=3 2024-12-07T17:46:12,692 DEBUG [RS:1;bd53b59592b3:41571 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/bd53b59592b3:0, corePoolSize=1, maxPoolSize=1 2024-12-07T17:46:12,692 DEBUG [RS:1;bd53b59592b3:41571 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/bd53b59592b3:0, corePoolSize=1, maxPoolSize=1 2024-12-07T17:46:12,692 DEBUG [RS:1;bd53b59592b3:41571 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/bd53b59592b3:0, corePoolSize=3, maxPoolSize=3 2024-12-07T17:46:12,692 DEBUG [RS:1;bd53b59592b3:41571 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/bd53b59592b3:0, corePoolSize=3, maxPoolSize=3 2024-12-07T17:46:12,692 INFO [RS:0;bd53b59592b3:33337 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-07T17:46:12,692 INFO [RS:0;bd53b59592b3:33337 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-07T17:46:12,693 INFO [RS:0;bd53b59592b3:33337 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-07T17:46:12,693 INFO [RS:0;bd53b59592b3:33337 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-07T17:46:12,693 INFO [RS:0;bd53b59592b3:33337 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-07T17:46:12,693 INFO [RS:0;bd53b59592b3:33337 {}] hbase.ChoreService(168): Chore ScheduledChore name=bd53b59592b3,33337,1733593572089-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-07T17:46:12,693 INFO [RS:1;bd53b59592b3:41571 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-07T17:46:12,693 INFO [RS:1;bd53b59592b3:41571 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-07T17:46:12,693 INFO [RS:1;bd53b59592b3:41571 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-07T17:46:12,693 INFO [RS:1;bd53b59592b3:41571 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-07T17:46:12,693 INFO [RS:1;bd53b59592b3:41571 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-07T17:46:12,693 INFO [RS:1;bd53b59592b3:41571 {}] hbase.ChoreService(168): Chore ScheduledChore name=bd53b59592b3,41571,1733593572121-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-07T17:46:12,702 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-07T17:46:12,704 INFO [RS:0;bd53b59592b3:33337 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-07T17:46:12,705 INFO [RS:0;bd53b59592b3:33337 {}] hbase.ChoreService(168): Chore ScheduledChore name=bd53b59592b3,33337,1733593572089-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-07T17:46:12,705 INFO [RS:0;bd53b59592b3:33337 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T17:46:12,705 INFO [RS:0;bd53b59592b3:33337 {}] regionserver.Replication(171): bd53b59592b3,33337,1733593572089 started 2024-12-07T17:46:12,709 INFO [RS:2;bd53b59592b3:35059 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-07T17:46:12,709 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T17:46:12,709 INFO [RS:2;bd53b59592b3:35059 {}] hbase.ChoreService(168): Chore ScheduledChore name=bd53b59592b3,35059,1733593572156-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-07T17:46:12,709 INFO [RS:2;bd53b59592b3:35059 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T17:46:12,709 INFO [RS:2;bd53b59592b3:35059 {}] regionserver.Replication(171): bd53b59592b3,35059,1733593572156 started 2024-12-07T17:46:12,713 INFO [RS:1;bd53b59592b3:41571 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-07T17:46:12,713 INFO [RS:1;bd53b59592b3:41571 {}] hbase.ChoreService(168): Chore ScheduledChore name=bd53b59592b3,41571,1733593572121-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-07T17:46:12,713 INFO [RS:1;bd53b59592b3:41571 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T17:46:12,714 INFO [RS:1;bd53b59592b3:41571 {}] regionserver.Replication(171): bd53b59592b3,41571,1733593572121 started 2024-12-07T17:46:12,716 WARN [bd53b59592b3:38409 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-07T17:46:12,718 INFO [RS:0;bd53b59592b3:33337 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T17:46:12,718 INFO [RS:0;bd53b59592b3:33337 {}] regionserver.HRegionServer(1482): Serving as bd53b59592b3,33337,1733593572089, RpcServer on bd53b59592b3/172.17.0.2:33337, sessionid=0x100017060d60001 2024-12-07T17:46:12,718 DEBUG [RS:0;bd53b59592b3:33337 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-07T17:46:12,718 DEBUG [RS:0;bd53b59592b3:33337 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager bd53b59592b3,33337,1733593572089 2024-12-07T17:46:12,718 DEBUG [RS:0;bd53b59592b3:33337 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'bd53b59592b3,33337,1733593572089' 2024-12-07T17:46:12,718 DEBUG [RS:0;bd53b59592b3:33337 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-07T17:46:12,719 DEBUG [RS:0;bd53b59592b3:33337 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-07T17:46:12,719 DEBUG [RS:0;bd53b59592b3:33337 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-07T17:46:12,719 DEBUG [RS:0;bd53b59592b3:33337 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-07T17:46:12,719 DEBUG [RS:0;bd53b59592b3:33337 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager bd53b59592b3,33337,1733593572089 2024-12-07T17:46:12,719 DEBUG [RS:0;bd53b59592b3:33337 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'bd53b59592b3,33337,1733593572089' 2024-12-07T17:46:12,719 DEBUG [RS:0;bd53b59592b3:33337 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-07T17:46:12,720 DEBUG [RS:0;bd53b59592b3:33337 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-07T17:46:12,720 DEBUG [RS:0;bd53b59592b3:33337 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-07T17:46:12,720 INFO [RS:0;bd53b59592b3:33337 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-07T17:46:12,720 INFO [RS:0;bd53b59592b3:33337 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-07T17:46:12,728 INFO [RS:2;bd53b59592b3:35059 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T17:46:12,728 INFO [RS:2;bd53b59592b3:35059 {}] regionserver.HRegionServer(1482): Serving as bd53b59592b3,35059,1733593572156, RpcServer on bd53b59592b3/172.17.0.2:35059, sessionid=0x100017060d60003 2024-12-07T17:46:12,729 DEBUG [RS:2;bd53b59592b3:35059 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-07T17:46:12,729 DEBUG [RS:2;bd53b59592b3:35059 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager bd53b59592b3,35059,1733593572156 2024-12-07T17:46:12,729 DEBUG [RS:2;bd53b59592b3:35059 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'bd53b59592b3,35059,1733593572156' 2024-12-07T17:46:12,729 DEBUG [RS:2;bd53b59592b3:35059 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-07T17:46:12,729 DEBUG [RS:2;bd53b59592b3:35059 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-07T17:46:12,730 DEBUG [RS:2;bd53b59592b3:35059 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-07T17:46:12,730 DEBUG [RS:2;bd53b59592b3:35059 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-07T17:46:12,730 DEBUG [RS:2;bd53b59592b3:35059 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager bd53b59592b3,35059,1733593572156 2024-12-07T17:46:12,730 DEBUG [RS:2;bd53b59592b3:35059 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'bd53b59592b3,35059,1733593572156' 2024-12-07T17:46:12,730 DEBUG [RS:2;bd53b59592b3:35059 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-07T17:46:12,730 DEBUG [RS:2;bd53b59592b3:35059 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-07T17:46:12,731 DEBUG [RS:2;bd53b59592b3:35059 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-07T17:46:12,731 INFO [RS:2;bd53b59592b3:35059 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-07T17:46:12,731 INFO [RS:2;bd53b59592b3:35059 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-07T17:46:12,732 INFO [RS:1;bd53b59592b3:41571 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T17:46:12,732 INFO [RS:1;bd53b59592b3:41571 {}] regionserver.HRegionServer(1482): Serving as bd53b59592b3,41571,1733593572121, RpcServer on bd53b59592b3/172.17.0.2:41571, sessionid=0x100017060d60002 2024-12-07T17:46:12,732 DEBUG [RS:1;bd53b59592b3:41571 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-07T17:46:12,732 DEBUG [RS:1;bd53b59592b3:41571 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager bd53b59592b3,41571,1733593572121 2024-12-07T17:46:12,732 DEBUG [RS:1;bd53b59592b3:41571 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'bd53b59592b3,41571,1733593572121' 2024-12-07T17:46:12,732 DEBUG [RS:1;bd53b59592b3:41571 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-07T17:46:12,732 DEBUG [RS:1;bd53b59592b3:41571 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-07T17:46:12,733 DEBUG [RS:1;bd53b59592b3:41571 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-07T17:46:12,733 DEBUG [RS:1;bd53b59592b3:41571 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-07T17:46:12,733 DEBUG [RS:1;bd53b59592b3:41571 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager bd53b59592b3,41571,1733593572121 2024-12-07T17:46:12,733 DEBUG [RS:1;bd53b59592b3:41571 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'bd53b59592b3,41571,1733593572121' 2024-12-07T17:46:12,733 DEBUG [RS:1;bd53b59592b3:41571 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-07T17:46:12,733 DEBUG [RS:1;bd53b59592b3:41571 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-07T17:46:12,734 DEBUG [RS:1;bd53b59592b3:41571 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-07T17:46:12,734 INFO [RS:1;bd53b59592b3:41571 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-07T17:46:12,734 INFO [RS:1;bd53b59592b3:41571 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-07T17:46:12,749 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T17:46:12,749 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T17:46:12,824 INFO [RS:0;bd53b59592b3:33337 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=bd53b59592b3%2C33337%2C1733593572089, suffix=, logDir=hdfs://localhost:34695/user/jenkins/test-data/8f6135a2-6946-11c3-f0e0-bb31dc5677ab/WALs/bd53b59592b3,33337,1733593572089, archiveDir=hdfs://localhost:34695/user/jenkins/test-data/8f6135a2-6946-11c3-f0e0-bb31dc5677ab/oldWALs, maxLogs=32 2024-12-07T17:46:12,827 INFO [RS:0;bd53b59592b3:33337 {}] monitor.StreamSlowMonitor(122): New stream slow monitor bd53b59592b3%2C33337%2C1733593572089.1733593572827 2024-12-07T17:46:12,833 INFO [RS:2;bd53b59592b3:35059 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=bd53b59592b3%2C35059%2C1733593572156, suffix=, logDir=hdfs://localhost:34695/user/jenkins/test-data/8f6135a2-6946-11c3-f0e0-bb31dc5677ab/WALs/bd53b59592b3,35059,1733593572156, archiveDir=hdfs://localhost:34695/user/jenkins/test-data/8f6135a2-6946-11c3-f0e0-bb31dc5677ab/oldWALs, maxLogs=32 2024-12-07T17:46:12,835 INFO [RS:2;bd53b59592b3:35059 {}] monitor.StreamSlowMonitor(122): New stream slow monitor bd53b59592b3%2C35059%2C1733593572156.1733593572834 2024-12-07T17:46:12,836 INFO [RS:0;bd53b59592b3:33337 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/8f6135a2-6946-11c3-f0e0-bb31dc5677ab/WALs/bd53b59592b3,33337,1733593572089/bd53b59592b3%2C33337%2C1733593572089.1733593572827 2024-12-07T17:46:12,836 INFO [RS:1;bd53b59592b3:41571 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=bd53b59592b3%2C41571%2C1733593572121, suffix=, logDir=hdfs://localhost:34695/user/jenkins/test-data/8f6135a2-6946-11c3-f0e0-bb31dc5677ab/WALs/bd53b59592b3,41571,1733593572121, archiveDir=hdfs://localhost:34695/user/jenkins/test-data/8f6135a2-6946-11c3-f0e0-bb31dc5677ab/oldWALs, maxLogs=32 2024-12-07T17:46:12,837 INFO [RS:1;bd53b59592b3:41571 {}] monitor.StreamSlowMonitor(122): New stream slow monitor bd53b59592b3%2C41571%2C1733593572121.1733593572837 2024-12-07T17:46:12,845 DEBUG [RS:0;bd53b59592b3:33337 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40177:40177),(127.0.0.1/127.0.0.1:34183:34183),(127.0.0.1/127.0.0.1:44005:44005)] 2024-12-07T17:46:12,851 INFO [RS:2;bd53b59592b3:35059 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/8f6135a2-6946-11c3-f0e0-bb31dc5677ab/WALs/bd53b59592b3,35059,1733593572156/bd53b59592b3%2C35059%2C1733593572156.1733593572834 2024-12-07T17:46:12,851 INFO [RS:1;bd53b59592b3:41571 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/8f6135a2-6946-11c3-f0e0-bb31dc5677ab/WALs/bd53b59592b3,41571,1733593572121/bd53b59592b3%2C41571%2C1733593572121.1733593572837 2024-12-07T17:46:12,852 DEBUG [RS:2;bd53b59592b3:35059 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44005:44005),(127.0.0.1/127.0.0.1:40177:40177),(127.0.0.1/127.0.0.1:34183:34183)] 2024-12-07T17:46:12,852 DEBUG [RS:1;bd53b59592b3:41571 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34183:34183),(127.0.0.1/127.0.0.1:40177:40177),(127.0.0.1/127.0.0.1:44005:44005)] 2024-12-07T17:46:12,966 DEBUG [bd53b59592b3:38409 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=3, allServersCount=3 2024-12-07T17:46:12,966 DEBUG [bd53b59592b3:38409 {}] balancer.BalancerClusterState(204): Hosts are {bd53b59592b3=0} racks are {/default-rack=0} 2024-12-07T17:46:12,969 DEBUG [bd53b59592b3:38409 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-07T17:46:12,969 DEBUG [bd53b59592b3:38409 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-07T17:46:12,969 DEBUG [bd53b59592b3:38409 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-07T17:46:12,969 DEBUG [bd53b59592b3:38409 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-07T17:46:12,969 DEBUG [bd53b59592b3:38409 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-07T17:46:12,969 DEBUG [bd53b59592b3:38409 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-07T17:46:12,969 INFO [bd53b59592b3:38409 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-07T17:46:12,969 INFO [bd53b59592b3:38409 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-07T17:46:12,969 INFO [bd53b59592b3:38409 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-07T17:46:12,970 DEBUG [bd53b59592b3:38409 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-07T17:46:12,970 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=bd53b59592b3,33337,1733593572089 2024-12-07T17:46:12,972 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as bd53b59592b3,33337,1733593572089, state=OPENING 2024-12-07T17:46:12,984 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-07T17:46:13,001 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41571-0x100017060d60002, quorum=127.0.0.1:49709, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T17:46:13,001 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38409-0x100017060d60000, quorum=127.0.0.1:49709, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T17:46:13,001 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35059-0x100017060d60003, quorum=127.0.0.1:49709, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T17:46:13,001 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33337-0x100017060d60001, quorum=127.0.0.1:49709, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T17:46:13,002 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-07T17:46:13,002 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-07T17:46:13,002 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-07T17:46:13,002 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-07T17:46:13,002 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-07T17:46:13,002 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=bd53b59592b3,33337,1733593572089}] 2024-12-07T17:46:13,158 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-07T17:46:13,162 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43025, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-07T17:46:13,169 INFO [RS_OPEN_META-regionserver/bd53b59592b3:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-07T17:46:13,169 INFO [RS_OPEN_META-regionserver/bd53b59592b3:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-07T17:46:13,171 INFO [RS_OPEN_META-regionserver/bd53b59592b3:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=bd53b59592b3%2C33337%2C1733593572089.meta, suffix=.meta, logDir=hdfs://localhost:34695/user/jenkins/test-data/8f6135a2-6946-11c3-f0e0-bb31dc5677ab/WALs/bd53b59592b3,33337,1733593572089, archiveDir=hdfs://localhost:34695/user/jenkins/test-data/8f6135a2-6946-11c3-f0e0-bb31dc5677ab/oldWALs, maxLogs=32 2024-12-07T17:46:13,172 INFO [RS_OPEN_META-regionserver/bd53b59592b3:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor bd53b59592b3%2C33337%2C1733593572089.meta.1733593573172.meta 2024-12-07T17:46:13,179 INFO [RS_OPEN_META-regionserver/bd53b59592b3:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/8f6135a2-6946-11c3-f0e0-bb31dc5677ab/WALs/bd53b59592b3,33337,1733593572089/bd53b59592b3%2C33337%2C1733593572089.meta.1733593573172.meta 2024-12-07T17:46:13,187 DEBUG [RS_OPEN_META-regionserver/bd53b59592b3:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40177:40177),(127.0.0.1/127.0.0.1:34183:34183),(127.0.0.1/127.0.0.1:44005:44005)] 2024-12-07T17:46:13,190 DEBUG [RS_OPEN_META-regionserver/bd53b59592b3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-07T17:46:13,191 DEBUG [RS_OPEN_META-regionserver/bd53b59592b3:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-07T17:46:13,191 DEBUG [RS_OPEN_META-regionserver/bd53b59592b3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-07T17:46:13,191 INFO [RS_OPEN_META-regionserver/bd53b59592b3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-07T17:46:13,191 DEBUG [RS_OPEN_META-regionserver/bd53b59592b3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-07T17:46:13,191 DEBUG [RS_OPEN_META-regionserver/bd53b59592b3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T17:46:13,191 DEBUG [RS_OPEN_META-regionserver/bd53b59592b3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-07T17:46:13,191 DEBUG [RS_OPEN_META-regionserver/bd53b59592b3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-07T17:46:13,193 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-07T17:46:13,195 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-07T17:46:13,195 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T17:46:13,195 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T17:46:13,195 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-07T17:46:13,196 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-07T17:46:13,197 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T17:46:13,197 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T17:46:13,197 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-07T17:46:13,198 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-07T17:46:13,198 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T17:46:13,199 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T17:46:13,199 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-07T17:46:13,200 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-07T17:46:13,200 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T17:46:13,201 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T17:46:13,201 DEBUG [RS_OPEN_META-regionserver/bd53b59592b3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-07T17:46:13,202 DEBUG [RS_OPEN_META-regionserver/bd53b59592b3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34695/user/jenkins/test-data/8f6135a2-6946-11c3-f0e0-bb31dc5677ab/data/hbase/meta/1588230740 2024-12-07T17:46:13,203 DEBUG [RS_OPEN_META-regionserver/bd53b59592b3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34695/user/jenkins/test-data/8f6135a2-6946-11c3-f0e0-bb31dc5677ab/data/hbase/meta/1588230740 2024-12-07T17:46:13,205 DEBUG [RS_OPEN_META-regionserver/bd53b59592b3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-07T17:46:13,205 DEBUG [RS_OPEN_META-regionserver/bd53b59592b3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-07T17:46:13,205 DEBUG [RS_OPEN_META-regionserver/bd53b59592b3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-07T17:46:13,207 DEBUG [RS_OPEN_META-regionserver/bd53b59592b3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-07T17:46:13,208 INFO [RS_OPEN_META-regionserver/bd53b59592b3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=59800846, jitterRate=-0.10889795422554016}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-07T17:46:13,208 DEBUG [RS_OPEN_META-regionserver/bd53b59592b3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-07T17:46:13,209 DEBUG [RS_OPEN_META-regionserver/bd53b59592b3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733593573192Writing region info on filesystem at 1733593573192Initializing all the Stores at 1733593573193 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733593573193Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733593573193Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733593573193Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733593573193Cleaning up temporary data from old regions at 1733593573205 (+12 ms)Running coprocessor post-open hooks at 1733593573208 (+3 ms)Region opened successfully at 1733593573209 (+1 ms) 2024-12-07T17:46:13,211 INFO [RS_OPEN_META-regionserver/bd53b59592b3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733593573157 2024-12-07T17:46:13,214 DEBUG [RS_OPEN_META-regionserver/bd53b59592b3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-07T17:46:13,215 INFO [RS_OPEN_META-regionserver/bd53b59592b3:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-07T17:46:13,215 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=bd53b59592b3,33337,1733593572089 2024-12-07T17:46:13,217 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as bd53b59592b3,33337,1733593572089, state=OPEN 2024-12-07T17:46:13,270 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33337-0x100017060d60001, quorum=127.0.0.1:49709, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-07T17:46:13,270 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35059-0x100017060d60003, quorum=127.0.0.1:49709, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-07T17:46:13,270 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41571-0x100017060d60002, quorum=127.0.0.1:49709, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-07T17:46:13,270 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38409-0x100017060d60000, quorum=127.0.0.1:49709, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-07T17:46:13,270 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=bd53b59592b3,33337,1733593572089 2024-12-07T17:46:13,270 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-07T17:46:13,270 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-07T17:46:13,270 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-07T17:46:13,270 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-07T17:46:13,276 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-07T17:46:13,276 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=bd53b59592b3,33337,1733593572089 in 268 msec 2024-12-07T17:46:13,281 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-07T17:46:13,281 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 714 msec 2024-12-07T17:46:13,283 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-07T17:46:13,283 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-07T17:46:13,285 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-07T17:46:13,285 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=bd53b59592b3,33337,1733593572089, seqNum=-1] 2024-12-07T17:46:13,285 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T17:46:13,287 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40245, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T17:46:13,298 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 783 msec 2024-12-07T17:46:13,298 INFO [master/bd53b59592b3:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733593573298, completionTime=-1 2024-12-07T17:46:13,298 INFO [master/bd53b59592b3:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=3; waited=0ms, expected min=3 server(s), max=3 server(s), master is running 2024-12-07T17:46:13,298 DEBUG [master/bd53b59592b3:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-07T17:46:13,301 INFO [master/bd53b59592b3:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=3 2024-12-07T17:46:13,301 INFO [master/bd53b59592b3:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733593633301 2024-12-07T17:46:13,301 INFO [master/bd53b59592b3:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733593693301 2024-12-07T17:46:13,301 INFO [master/bd53b59592b3:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 3 msec 2024-12-07T17:46:13,302 DEBUG [master/bd53b59592b3:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 1588230740 changed from -1.0 to 0.0, refreshing cache 2024-12-07T17:46:13,302 INFO [master/bd53b59592b3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=bd53b59592b3,38409,1733593571919-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-07T17:46:13,303 INFO [master/bd53b59592b3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=bd53b59592b3,38409,1733593571919-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T17:46:13,303 INFO [master/bd53b59592b3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=bd53b59592b3,38409,1733593571919-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T17:46:13,303 INFO [master/bd53b59592b3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-bd53b59592b3:38409, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T17:46:13,303 INFO [master/bd53b59592b3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-07T17:46:13,303 INFO [master/bd53b59592b3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-07T17:46:13,307 DEBUG [master/bd53b59592b3:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-07T17:46:13,309 INFO [master/bd53b59592b3:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.097sec 2024-12-07T17:46:13,310 INFO [master/bd53b59592b3:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-07T17:46:13,310 INFO [master/bd53b59592b3:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-07T17:46:13,310 INFO [master/bd53b59592b3:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-07T17:46:13,310 INFO [master/bd53b59592b3:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-07T17:46:13,310 INFO [master/bd53b59592b3:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-07T17:46:13,310 INFO [master/bd53b59592b3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=bd53b59592b3,38409,1733593571919-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-07T17:46:13,310 INFO [master/bd53b59592b3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=bd53b59592b3,38409,1733593571919-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-07T17:46:13,313 DEBUG [master/bd53b59592b3:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-07T17:46:13,314 INFO [master/bd53b59592b3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-07T17:46:13,314 INFO [master/bd53b59592b3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=bd53b59592b3,38409,1733593571919-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T17:46:13,378 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@70fbe73e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T17:46:13,378 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request bd53b59592b3,38409,-1 for getting cluster id 2024-12-07T17:46:13,379 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-07T17:46:13,380 DEBUG [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '87eedd31-cc0e-4d73-8464-9345f6f41299' 2024-12-07T17:46:13,381 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-07T17:46:13,381 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "87eedd31-cc0e-4d73-8464-9345f6f41299" 2024-12-07T17:46:13,381 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@36e1772e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T17:46:13,381 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [bd53b59592b3,38409,-1] 2024-12-07T17:46:13,382 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-07T17:46:13,382 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T17:46:13,383 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43350, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-07T17:46:13,385 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@f40f134, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T17:46:13,385 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-07T17:46:13,386 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=bd53b59592b3,33337,1733593572089, seqNum=-1] 2024-12-07T17:46:13,386 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T17:46:13,388 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52564, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T17:46:13,391 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=bd53b59592b3,38409,1733593571919 2024-12-07T17:46:13,391 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-07T17:46:13,392 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] client.AsyncConnectionImpl(321): The fetched master address is bd53b59592b3,38409,1733593571919 2024-12-07T17:46:13,393 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@2005a384 2024-12-07T17:46:13,393 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-07T17:46:13,395 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43352, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-07T17:46:13,396 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38409 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-07T17:46:13,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38409 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC 2024-12-07T17:46:13,399 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_PRE_OPERATION 2024-12-07T17:46:13,400 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T17:46:13,400 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38409 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestHBaseWalOnEC" procId is: 4 2024-12-07T17:46:13,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38409 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-07T17:46:13,401 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-07T17:46:13,413 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34699 is added to blk_1073741837_1013 (size=392) 2024-12-07T17:46:13,413 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41297 is added to blk_1073741837_1013 (size=392) 2024-12-07T17:46:13,414 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34327 is added to blk_1073741837_1013 (size=392) 2024-12-07T17:46:13,416 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 07b60f984595c622eb414c571fb6d57b, NAME => 'TestHBaseWalOnEC,,1733593573395.07b60f984595c622eb414c571fb6d57b.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:34695/user/jenkins/test-data/8f6135a2-6946-11c3-f0e0-bb31dc5677ab 2024-12-07T17:46:13,426 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34699 is added to blk_1073741838_1014 (size=51) 2024-12-07T17:46:13,426 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34327 is added to blk_1073741838_1014 (size=51) 2024-12-07T17:46:13,427 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41297 is added to blk_1073741838_1014 (size=51) 2024-12-07T17:46:13,428 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1733593573395.07b60f984595c622eb414c571fb6d57b.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T17:46:13,428 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1722): Closing 07b60f984595c622eb414c571fb6d57b, disabling compactions & flushes 2024-12-07T17:46:13,428 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1733593573395.07b60f984595c622eb414c571fb6d57b. 2024-12-07T17:46:13,428 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1733593573395.07b60f984595c622eb414c571fb6d57b. 2024-12-07T17:46:13,428 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1733593573395.07b60f984595c622eb414c571fb6d57b. after waiting 0 ms 2024-12-07T17:46:13,428 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1733593573395.07b60f984595c622eb414c571fb6d57b. 2024-12-07T17:46:13,428 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1733593573395.07b60f984595c622eb414c571fb6d57b. 2024-12-07T17:46:13,428 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1676): Region close journal for 07b60f984595c622eb414c571fb6d57b: Waiting for close lock at 1733593573428Disabling compacts and flushes for region at 1733593573428Disabling writes for close at 1733593573428Writing region close event to WAL at 1733593573428Closed at 1733593573428 2024-12-07T17:46:13,430 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ADD_TO_META 2024-12-07T17:46:13,430 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestHBaseWalOnEC,,1733593573395.07b60f984595c622eb414c571fb6d57b.","families":{"info":[{"qualifier":"regioninfo","vlen":50,"tag":[],"timestamp":"1733593573430"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733593573430"}]},"ts":"1733593573430"} 2024-12-07T17:46:13,434 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-07T17:46:13,435 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-07T17:46:13,436 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733593573435"}]},"ts":"1733593573435"} 2024-12-07T17:46:13,439 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLING in hbase:meta 2024-12-07T17:46:13,439 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {bd53b59592b3=0} racks are {/default-rack=0} 2024-12-07T17:46:13,440 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-07T17:46:13,440 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-07T17:46:13,440 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-07T17:46:13,440 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-07T17:46:13,440 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-07T17:46:13,440 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-07T17:46:13,440 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-07T17:46:13,440 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-07T17:46:13,440 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-07T17:46:13,440 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-07T17:46:13,440 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=07b60f984595c622eb414c571fb6d57b, ASSIGN}] 2024-12-07T17:46:13,442 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=07b60f984595c622eb414c571fb6d57b, ASSIGN 2024-12-07T17:46:13,444 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=07b60f984595c622eb414c571fb6d57b, ASSIGN; state=OFFLINE, location=bd53b59592b3,41571,1733593572121; forceNewPlan=false, retain=false 2024-12-07T17:46:13,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38409 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-07T17:46:13,594 INFO [bd53b59592b3:38409 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-07T17:46:13,595 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=07b60f984595c622eb414c571fb6d57b, regionState=OPENING, regionLocation=bd53b59592b3,41571,1733593572121 2024-12-07T17:46:13,599 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-10-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=07b60f984595c622eb414c571fb6d57b, ASSIGN because future has completed 2024-12-07T17:46:13,600 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 07b60f984595c622eb414c571fb6d57b, server=bd53b59592b3,41571,1733593572121}] 2024-12-07T17:46:13,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38409 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-07T17:46:13,754 DEBUG [RSProcedureDispatcher-pool-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-07T17:46:13,756 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-9-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45267, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-07T17:46:13,761 INFO [RS_OPEN_REGION-regionserver/bd53b59592b3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestHBaseWalOnEC,,1733593573395.07b60f984595c622eb414c571fb6d57b. 2024-12-07T17:46:13,761 DEBUG [RS_OPEN_REGION-regionserver/bd53b59592b3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 07b60f984595c622eb414c571fb6d57b, NAME => 'TestHBaseWalOnEC,,1733593573395.07b60f984595c622eb414c571fb6d57b.', STARTKEY => '', ENDKEY => ''} 2024-12-07T17:46:13,762 DEBUG [RS_OPEN_REGION-regionserver/bd53b59592b3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestHBaseWalOnEC 07b60f984595c622eb414c571fb6d57b 2024-12-07T17:46:13,762 DEBUG [RS_OPEN_REGION-regionserver/bd53b59592b3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1733593573395.07b60f984595c622eb414c571fb6d57b.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T17:46:13,762 DEBUG [RS_OPEN_REGION-regionserver/bd53b59592b3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 07b60f984595c622eb414c571fb6d57b 2024-12-07T17:46:13,762 DEBUG [RS_OPEN_REGION-regionserver/bd53b59592b3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 07b60f984595c622eb414c571fb6d57b 2024-12-07T17:46:13,764 INFO [StoreOpener-07b60f984595c622eb414c571fb6d57b-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 07b60f984595c622eb414c571fb6d57b 2024-12-07T17:46:13,766 INFO [StoreOpener-07b60f984595c622eb414c571fb6d57b-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 07b60f984595c622eb414c571fb6d57b columnFamilyName cf 2024-12-07T17:46:13,766 DEBUG [StoreOpener-07b60f984595c622eb414c571fb6d57b-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T17:46:13,767 INFO [StoreOpener-07b60f984595c622eb414c571fb6d57b-1 {}] regionserver.HStore(327): Store=07b60f984595c622eb414c571fb6d57b/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T17:46:13,767 DEBUG [RS_OPEN_REGION-regionserver/bd53b59592b3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 07b60f984595c622eb414c571fb6d57b 2024-12-07T17:46:13,768 DEBUG [RS_OPEN_REGION-regionserver/bd53b59592b3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34695/user/jenkins/test-data/8f6135a2-6946-11c3-f0e0-bb31dc5677ab/data/default/TestHBaseWalOnEC/07b60f984595c622eb414c571fb6d57b 2024-12-07T17:46:13,769 DEBUG [RS_OPEN_REGION-regionserver/bd53b59592b3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34695/user/jenkins/test-data/8f6135a2-6946-11c3-f0e0-bb31dc5677ab/data/default/TestHBaseWalOnEC/07b60f984595c622eb414c571fb6d57b 2024-12-07T17:46:13,769 DEBUG [RS_OPEN_REGION-regionserver/bd53b59592b3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 07b60f984595c622eb414c571fb6d57b 2024-12-07T17:46:13,769 DEBUG [RS_OPEN_REGION-regionserver/bd53b59592b3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 07b60f984595c622eb414c571fb6d57b 2024-12-07T17:46:13,771 DEBUG [RS_OPEN_REGION-regionserver/bd53b59592b3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 07b60f984595c622eb414c571fb6d57b 2024-12-07T17:46:13,774 DEBUG [RS_OPEN_REGION-regionserver/bd53b59592b3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34695/user/jenkins/test-data/8f6135a2-6946-11c3-f0e0-bb31dc5677ab/data/default/TestHBaseWalOnEC/07b60f984595c622eb414c571fb6d57b/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-07T17:46:13,776 INFO [RS_OPEN_REGION-regionserver/bd53b59592b3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 07b60f984595c622eb414c571fb6d57b; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=62989064, jitterRate=-0.061389803886413574}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-07T17:46:13,776 DEBUG [RS_OPEN_REGION-regionserver/bd53b59592b3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 07b60f984595c622eb414c571fb6d57b 2024-12-07T17:46:13,776 DEBUG [RS_OPEN_REGION-regionserver/bd53b59592b3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 07b60f984595c622eb414c571fb6d57b: Running coprocessor pre-open hook at 1733593573762Writing region info on filesystem at 1733593573762Initializing all the Stores at 1733593573763 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733593573763Cleaning up temporary data from old regions at 1733593573769 (+6 ms)Running coprocessor post-open hooks at 1733593573776 (+7 ms)Region opened successfully at 1733593573776 2024-12-07T17:46:13,778 INFO [RS_OPEN_REGION-regionserver/bd53b59592b3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestHBaseWalOnEC,,1733593573395.07b60f984595c622eb414c571fb6d57b., pid=6, masterSystemTime=1733593573754 2024-12-07T17:46:13,781 DEBUG [RS_OPEN_REGION-regionserver/bd53b59592b3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestHBaseWalOnEC,,1733593573395.07b60f984595c622eb414c571fb6d57b. 2024-12-07T17:46:13,781 INFO [RS_OPEN_REGION-regionserver/bd53b59592b3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestHBaseWalOnEC,,1733593573395.07b60f984595c622eb414c571fb6d57b. 2024-12-07T17:46:13,782 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=07b60f984595c622eb414c571fb6d57b, regionState=OPEN, openSeqNum=2, regionLocation=bd53b59592b3,41571,1733593572121 2024-12-07T17:46:13,785 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-10-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 07b60f984595c622eb414c571fb6d57b, server=bd53b59592b3,41571,1733593572121 because future has completed 2024-12-07T17:46:13,791 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-07T17:46:13,791 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 07b60f984595c622eb414c571fb6d57b, server=bd53b59592b3,41571,1733593572121 in 187 msec 2024-12-07T17:46:13,796 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-07T17:46:13,796 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=07b60f984595c622eb414c571fb6d57b, ASSIGN in 351 msec 2024-12-07T17:46:13,797 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-07T17:46:13,798 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733593573797"}]},"ts":"1733593573797"} 2024-12-07T17:46:13,801 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLED in hbase:meta 2024-12-07T17:46:13,803 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_POST_OPERATION 2024-12-07T17:46:13,806 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC in 407 msec 2024-12-07T17:46:14,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38409 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-07T17:46:14,028 INFO [RPCClient-NioEventLoopGroup-6-9 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestHBaseWalOnEC completed 2024-12-07T17:46:14,028 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table TestHBaseWalOnEC get assigned. Timeout = 60000ms 2024-12-07T17:46:14,029 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-07T17:46:14,034 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table TestHBaseWalOnEC assigned to meta. Checking AM states. 2024-12-07T17:46:14,034 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-07T17:46:14,035 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table TestHBaseWalOnEC assigned. 2024-12-07T17:46:14,040 DEBUG [RPCClient-NioEventLoopGroup-6-8 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestHBaseWalOnEC', row='row', locateType=CURRENT is [region=TestHBaseWalOnEC,,1733593573395.07b60f984595c622eb414c571fb6d57b., hostname=bd53b59592b3,41571,1733593572121, seqNum=2] 2024-12-07T17:46:14,041 DEBUG [RPCClient-NioEventLoopGroup-6-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T17:46:14,043 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-9-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58974, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T17:46:14,047 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38409 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestHBaseWalOnEC 2024-12-07T17:46:14,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38409 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC 2024-12-07T17:46:14,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38409 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-07T17:46:14,050 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_PREPARE 2024-12-07T17:46:14,052 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-07T17:46:14,052 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-07T17:46:14,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38409 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-07T17:46:14,208 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41571 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-12-07T17:46:14,208 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bd53b59592b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestHBaseWalOnEC,,1733593573395.07b60f984595c622eb414c571fb6d57b. 2024-12-07T17:46:14,208 INFO [RS_FLUSH_OPERATIONS-regionserver/bd53b59592b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing 07b60f984595c622eb414c571fb6d57b 1/1 column families, dataSize=32 B heapSize=360 B 2024-12-07T17:46:14,224 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bd53b59592b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34695/user/jenkins/test-data/8f6135a2-6946-11c3-f0e0-bb31dc5677ab/data/default/TestHBaseWalOnEC/07b60f984595c622eb414c571fb6d57b/.tmp/cf/adddb08ba7a24dfcaadaf54494ad42a0 is 36, key is row/cf:cq/1733593574044/Put/seqid=0 2024-12-07T17:46:14,231 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41297 is added to blk_1073741839_1015 (size=4787) 2024-12-07T17:46:14,232 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34327 is added to blk_1073741839_1015 (size=4787) 2024-12-07T17:46:14,232 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34699 is added to blk_1073741839_1015 (size=4787) 2024-12-07T17:46:14,232 INFO [RS_FLUSH_OPERATIONS-regionserver/bd53b59592b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=32 B at sequenceid=5 (bloomFilter=false), to=hdfs://localhost:34695/user/jenkins/test-data/8f6135a2-6946-11c3-f0e0-bb31dc5677ab/data/default/TestHBaseWalOnEC/07b60f984595c622eb414c571fb6d57b/.tmp/cf/adddb08ba7a24dfcaadaf54494ad42a0 2024-12-07T17:46:14,241 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bd53b59592b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34695/user/jenkins/test-data/8f6135a2-6946-11c3-f0e0-bb31dc5677ab/data/default/TestHBaseWalOnEC/07b60f984595c622eb414c571fb6d57b/.tmp/cf/adddb08ba7a24dfcaadaf54494ad42a0 as hdfs://localhost:34695/user/jenkins/test-data/8f6135a2-6946-11c3-f0e0-bb31dc5677ab/data/default/TestHBaseWalOnEC/07b60f984595c622eb414c571fb6d57b/cf/adddb08ba7a24dfcaadaf54494ad42a0 2024-12-07T17:46:14,249 INFO [RS_FLUSH_OPERATIONS-regionserver/bd53b59592b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34695/user/jenkins/test-data/8f6135a2-6946-11c3-f0e0-bb31dc5677ab/data/default/TestHBaseWalOnEC/07b60f984595c622eb414c571fb6d57b/cf/adddb08ba7a24dfcaadaf54494ad42a0, entries=1, sequenceid=5, filesize=4.7 K 2024-12-07T17:46:14,251 INFO [RS_FLUSH_OPERATIONS-regionserver/bd53b59592b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~32 B/32, heapSize ~344 B/344, currentSize=0 B/0 for 07b60f984595c622eb414c571fb6d57b in 43ms, sequenceid=5, compaction requested=false 2024-12-07T17:46:14,251 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bd53b59592b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for 07b60f984595c622eb414c571fb6d57b: 2024-12-07T17:46:14,251 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bd53b59592b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestHBaseWalOnEC,,1733593573395.07b60f984595c622eb414c571fb6d57b. 2024-12-07T17:46:14,251 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bd53b59592b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-12-07T17:46:14,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38409 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-12-07T17:46:14,256 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-12-07T17:46:14,256 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 201 msec 2024-12-07T17:46:14,259 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC in 210 msec 2024-12-07T17:46:14,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38409 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-07T17:46:14,368 INFO [RPCClient-NioEventLoopGroup-6-9 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestHBaseWalOnEC completed 2024-12-07T17:46:14,373 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-07T17:46:14,373 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-07T17:46:14,374 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-07T17:46:14,374 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T17:46:14,374 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T17:46:14,374 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-07T17:46:14,374 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-07T17:46:14,374 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=777460738, stopped=false 2024-12-07T17:46:14,374 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=bd53b59592b3,38409,1733593571919 2024-12-07T17:46:14,437 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41571-0x100017060d60002, quorum=127.0.0.1:49709, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-07T17:46:14,437 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35059-0x100017060d60003, quorum=127.0.0.1:49709, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-07T17:46:14,437 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38409-0x100017060d60000, quorum=127.0.0.1:49709, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-07T17:46:14,437 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33337-0x100017060d60001, quorum=127.0.0.1:49709, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-07T17:46:14,437 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35059-0x100017060d60003, quorum=127.0.0.1:49709, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T17:46:14,437 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41571-0x100017060d60002, quorum=127.0.0.1:49709, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T17:46:14,437 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-07T17:46:14,437 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38409-0x100017060d60000, quorum=127.0.0.1:49709, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T17:46:14,437 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33337-0x100017060d60001, quorum=127.0.0.1:49709, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T17:46:14,438 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-07T17:46:14,438 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-07T17:46:14,439 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T17:46:14,439 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:35059-0x100017060d60003, quorum=127.0.0.1:49709, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-07T17:46:14,439 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:38409-0x100017060d60000, quorum=127.0.0.1:49709, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-07T17:46:14,439 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:33337-0x100017060d60001, quorum=127.0.0.1:49709, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-07T17:46:14,439 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:41571-0x100017060d60002, quorum=127.0.0.1:49709, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-07T17:46:14,440 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'bd53b59592b3,33337,1733593572089' ***** 2024-12-07T17:46:14,440 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-07T17:46:14,440 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'bd53b59592b3,41571,1733593572121' ***** 2024-12-07T17:46:14,440 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-07T17:46:14,440 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'bd53b59592b3,35059,1733593572156' ***** 2024-12-07T17:46:14,440 INFO [RS:0;bd53b59592b3:33337 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-07T17:46:14,441 INFO [RS:0;bd53b59592b3:33337 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-07T17:46:14,441 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-07T17:46:14,441 INFO [RS:1;bd53b59592b3:41571 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-07T17:46:14,441 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-07T17:46:14,441 INFO [RS:0;bd53b59592b3:33337 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-07T17:46:14,441 INFO [RS:1;bd53b59592b3:41571 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-07T17:46:14,441 INFO [RS:0;bd53b59592b3:33337 {}] regionserver.HRegionServer(959): stopping server bd53b59592b3,33337,1733593572089 2024-12-07T17:46:14,441 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-07T17:46:14,441 INFO [RS:0;bd53b59592b3:33337 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-07T17:46:14,441 INFO [RS:1;bd53b59592b3:41571 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-07T17:46:14,441 INFO [RS:2;bd53b59592b3:35059 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-07T17:46:14,441 INFO [RS:1;bd53b59592b3:41571 {}] regionserver.HRegionServer(3091): Received CLOSE for 07b60f984595c622eb414c571fb6d57b 2024-12-07T17:46:14,441 INFO [RS:0;bd53b59592b3:33337 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;bd53b59592b3:33337. 2024-12-07T17:46:14,441 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-07T17:46:14,441 INFO [RS:2;bd53b59592b3:35059 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-07T17:46:14,442 INFO [RS:2;bd53b59592b3:35059 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-07T17:46:14,442 INFO [RS:2;bd53b59592b3:35059 {}] regionserver.HRegionServer(959): stopping server bd53b59592b3,35059,1733593572156 2024-12-07T17:46:14,442 INFO [RS:1;bd53b59592b3:41571 {}] regionserver.HRegionServer(959): stopping server bd53b59592b3,41571,1733593572121 2024-12-07T17:46:14,442 DEBUG [RS:0;bd53b59592b3:33337 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-07T17:46:14,442 INFO [RS:2;bd53b59592b3:35059 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-07T17:46:14,442 INFO [RS:1;bd53b59592b3:41571 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-07T17:46:14,442 DEBUG [RS:0;bd53b59592b3:33337 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T17:46:14,442 INFO [RS:2;bd53b59592b3:35059 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:2;bd53b59592b3:35059. 2024-12-07T17:46:14,442 INFO [RS:1;bd53b59592b3:41571 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;bd53b59592b3:41571. 2024-12-07T17:46:14,442 INFO [RS:0;bd53b59592b3:33337 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-07T17:46:14,442 DEBUG [RS:2;bd53b59592b3:35059 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-07T17:46:14,442 DEBUG [RS_CLOSE_REGION-regionserver/bd53b59592b3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 07b60f984595c622eb414c571fb6d57b, disabling compactions & flushes 2024-12-07T17:46:14,442 DEBUG [RS:2;bd53b59592b3:35059 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T17:46:14,442 INFO [RS:0;bd53b59592b3:33337 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-07T17:46:14,442 INFO [RS_CLOSE_REGION-regionserver/bd53b59592b3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1733593573395.07b60f984595c622eb414c571fb6d57b. 2024-12-07T17:46:14,442 INFO [RS:0;bd53b59592b3:33337 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-07T17:46:14,442 INFO [RS:2;bd53b59592b3:35059 {}] regionserver.HRegionServer(976): stopping server bd53b59592b3,35059,1733593572156; all regions closed. 2024-12-07T17:46:14,442 DEBUG [RS_CLOSE_REGION-regionserver/bd53b59592b3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1733593573395.07b60f984595c622eb414c571fb6d57b. 2024-12-07T17:46:14,442 INFO [RS:0;bd53b59592b3:33337 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-07T17:46:14,442 DEBUG [RS:1;bd53b59592b3:41571 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-07T17:46:14,442 DEBUG [RS_CLOSE_REGION-regionserver/bd53b59592b3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1733593573395.07b60f984595c622eb414c571fb6d57b. after waiting 0 ms 2024-12-07T17:46:14,443 DEBUG [RS:1;bd53b59592b3:41571 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T17:46:14,443 DEBUG [RS_CLOSE_REGION-regionserver/bd53b59592b3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1733593573395.07b60f984595c622eb414c571fb6d57b. 2024-12-07T17:46:14,443 INFO [RS:1;bd53b59592b3:41571 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-12-07T17:46:14,443 DEBUG [RS:1;bd53b59592b3:41571 {}] regionserver.HRegionServer(1325): Online Regions={07b60f984595c622eb414c571fb6d57b=TestHBaseWalOnEC,,1733593573395.07b60f984595c622eb414c571fb6d57b.} 2024-12-07T17:46:14,443 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T17:46:14,443 INFO [RS:0;bd53b59592b3:33337 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-12-07T17:46:14,443 DEBUG [RS:1;bd53b59592b3:41571 {}] regionserver.HRegionServer(1351): Waiting on 07b60f984595c622eb414c571fb6d57b 2024-12-07T17:46:14,443 DEBUG [RS:0;bd53b59592b3:33337 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-12-07T17:46:14,443 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T17:46:14,443 DEBUG [RS:0;bd53b59592b3:33337 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-12-07T17:46:14,443 DEBUG [RS_CLOSE_META-regionserver/bd53b59592b3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-07T17:46:14,443 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T17:46:14,443 INFO [RS_CLOSE_META-regionserver/bd53b59592b3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-07T17:46:14,443 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T17:46:14,443 DEBUG [RS_CLOSE_META-regionserver/bd53b59592b3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-07T17:46:14,443 DEBUG [RS_CLOSE_META-regionserver/bd53b59592b3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-07T17:46:14,443 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T17:46:14,443 DEBUG [RS_CLOSE_META-regionserver/bd53b59592b3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-07T17:46:14,444 INFO [RS_CLOSE_META-regionserver/bd53b59592b3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.34 KB heapSize=3.38 KB 2024-12-07T17:46:14,446 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34699 is added to blk_1073741834_1010 (size=93) 2024-12-07T17:46:14,447 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41297 is added to blk_1073741834_1010 (size=93) 2024-12-07T17:46:14,447 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34327 is added to blk_1073741834_1010 (size=93) 2024-12-07T17:46:14,449 DEBUG [RS_CLOSE_REGION-regionserver/bd53b59592b3:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34695/user/jenkins/test-data/8f6135a2-6946-11c3-f0e0-bb31dc5677ab/data/default/TestHBaseWalOnEC/07b60f984595c622eb414c571fb6d57b/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-07T17:46:14,450 DEBUG [RS:2;bd53b59592b3:35059 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/8f6135a2-6946-11c3-f0e0-bb31dc5677ab/oldWALs 2024-12-07T17:46:14,450 INFO [RS:2;bd53b59592b3:35059 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog bd53b59592b3%2C35059%2C1733593572156:(num 1733593572834) 2024-12-07T17:46:14,450 DEBUG [RS:2;bd53b59592b3:35059 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T17:46:14,450 INFO [RS:2;bd53b59592b3:35059 {}] regionserver.LeaseManager(133): Closed leases 2024-12-07T17:46:14,450 INFO [RS:2;bd53b59592b3:35059 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-07T17:46:14,450 INFO [RS_CLOSE_REGION-regionserver/bd53b59592b3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1733593573395.07b60f984595c622eb414c571fb6d57b. 2024-12-07T17:46:14,450 DEBUG [RS_CLOSE_REGION-regionserver/bd53b59592b3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 07b60f984595c622eb414c571fb6d57b: Waiting for close lock at 1733593574442Running coprocessor pre-close hooks at 1733593574442Disabling compacts and flushes for region at 1733593574442Disabling writes for close at 1733593574443 (+1 ms)Writing region close event to WAL at 1733593574444 (+1 ms)Running coprocessor post-close hooks at 1733593574450 (+6 ms)Closed at 1733593574450 2024-12-07T17:46:14,450 INFO [RS:2;bd53b59592b3:35059 {}] hbase.ChoreService(370): Chore service for: regionserver/bd53b59592b3:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-12-07T17:46:14,450 INFO [RS:2;bd53b59592b3:35059 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-07T17:46:14,450 DEBUG [RS_CLOSE_REGION-regionserver/bd53b59592b3:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestHBaseWalOnEC,,1733593573395.07b60f984595c622eb414c571fb6d57b. 2024-12-07T17:46:14,450 INFO [regionserver/bd53b59592b3:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-07T17:46:14,450 INFO [RS:2;bd53b59592b3:35059 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-07T17:46:14,450 INFO [RS:2;bd53b59592b3:35059 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-07T17:46:14,450 INFO [RS:2;bd53b59592b3:35059 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-07T17:46:14,451 INFO [RS:2;bd53b59592b3:35059 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:35059 2024-12-07T17:46:14,460 DEBUG [RS_CLOSE_META-regionserver/bd53b59592b3:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34695/user/jenkins/test-data/8f6135a2-6946-11c3-f0e0-bb31dc5677ab/data/hbase/meta/1588230740/.tmp/info/a8c10c2d07134152a33c5655f823d1cc is 153, key is TestHBaseWalOnEC,,1733593573395.07b60f984595c622eb414c571fb6d57b./info:regioninfo/1733593573782/Put/seqid=0 2024-12-07T17:46:14,461 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35059-0x100017060d60003, quorum=127.0.0.1:49709, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/bd53b59592b3,35059,1733593572156 2024-12-07T17:46:14,461 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38409-0x100017060d60000, quorum=127.0.0.1:49709, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-07T17:46:14,461 INFO [RS:2;bd53b59592b3:35059 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-07T17:46:14,462 WARN [IPC Server handler 3 on default port 34695 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-07T17:46:14,462 WARN [IPC Server handler 3 on default port 34695 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-07T17:46:14,462 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [bd53b59592b3,35059,1733593572156] 2024-12-07T17:46:14,462 WARN [IPC Server handler 3 on default port 34695 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-07T17:46:14,466 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41297 is added to blk_1073741840_1016 (size=6637) 2024-12-07T17:46:14,467 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34699 is added to blk_1073741840_1016 (size=6637) 2024-12-07T17:46:14,467 INFO [RS_CLOSE_META-regionserver/bd53b59592b3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.18 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:34695/user/jenkins/test-data/8f6135a2-6946-11c3-f0e0-bb31dc5677ab/data/hbase/meta/1588230740/.tmp/info/a8c10c2d07134152a33c5655f823d1cc 2024-12-07T17:46:14,478 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/bd53b59592b3,35059,1733593572156 already deleted, retry=false 2024-12-07T17:46:14,478 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; bd53b59592b3,35059,1733593572156 expired; onlineServers=2 2024-12-07T17:46:14,491 DEBUG [RS_CLOSE_META-regionserver/bd53b59592b3:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34695/user/jenkins/test-data/8f6135a2-6946-11c3-f0e0-bb31dc5677ab/data/hbase/meta/1588230740/.tmp/ns/3fbdc74ec8134b49a82834f857376413 is 43, key is default/ns:d/1733593573288/Put/seqid=0 2024-12-07T17:46:14,492 INFO [regionserver/bd53b59592b3:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-07T17:46:14,492 WARN [IPC Server handler 4 on default port 34695 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-07T17:46:14,492 WARN [IPC Server handler 4 on default port 34695 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-07T17:46:14,493 WARN [IPC Server handler 4 on default port 34695 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-07T17:46:14,496 INFO [regionserver/bd53b59592b3:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-07T17:46:14,496 INFO [regionserver/bd53b59592b3:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-07T17:46:14,498 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41297 is added to blk_1073741841_1017 (size=5153) 2024-12-07T17:46:14,498 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34699 is added to blk_1073741841_1017 (size=5153) 2024-12-07T17:46:14,498 INFO [RS_CLOSE_META-regionserver/bd53b59592b3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:34695/user/jenkins/test-data/8f6135a2-6946-11c3-f0e0-bb31dc5677ab/data/hbase/meta/1588230740/.tmp/ns/3fbdc74ec8134b49a82834f857376413 2024-12-07T17:46:14,518 DEBUG [RS_CLOSE_META-regionserver/bd53b59592b3:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34695/user/jenkins/test-data/8f6135a2-6946-11c3-f0e0-bb31dc5677ab/data/hbase/meta/1588230740/.tmp/table/4c8f7fa9e39d4a51aec5994f1d62f077 is 52, key is TestHBaseWalOnEC/table:state/1733593573797/Put/seqid=0 2024-12-07T17:46:14,524 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41297 is added to blk_1073741842_1018 (size=5249) 2024-12-07T17:46:14,524 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34699 is added to blk_1073741842_1018 (size=5249) 2024-12-07T17:46:14,525 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34327 is added to blk_1073741842_1018 (size=5249) 2024-12-07T17:46:14,525 INFO [RS_CLOSE_META-regionserver/bd53b59592b3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=96 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:34695/user/jenkins/test-data/8f6135a2-6946-11c3-f0e0-bb31dc5677ab/data/hbase/meta/1588230740/.tmp/table/4c8f7fa9e39d4a51aec5994f1d62f077 2024-12-07T17:46:14,533 DEBUG [RS_CLOSE_META-regionserver/bd53b59592b3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34695/user/jenkins/test-data/8f6135a2-6946-11c3-f0e0-bb31dc5677ab/data/hbase/meta/1588230740/.tmp/info/a8c10c2d07134152a33c5655f823d1cc as hdfs://localhost:34695/user/jenkins/test-data/8f6135a2-6946-11c3-f0e0-bb31dc5677ab/data/hbase/meta/1588230740/info/a8c10c2d07134152a33c5655f823d1cc 2024-12-07T17:46:14,541 INFO [RS_CLOSE_META-regionserver/bd53b59592b3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34695/user/jenkins/test-data/8f6135a2-6946-11c3-f0e0-bb31dc5677ab/data/hbase/meta/1588230740/info/a8c10c2d07134152a33c5655f823d1cc, entries=10, sequenceid=11, filesize=6.5 K 2024-12-07T17:46:14,542 DEBUG [RS_CLOSE_META-regionserver/bd53b59592b3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34695/user/jenkins/test-data/8f6135a2-6946-11c3-f0e0-bb31dc5677ab/data/hbase/meta/1588230740/.tmp/ns/3fbdc74ec8134b49a82834f857376413 as hdfs://localhost:34695/user/jenkins/test-data/8f6135a2-6946-11c3-f0e0-bb31dc5677ab/data/hbase/meta/1588230740/ns/3fbdc74ec8134b49a82834f857376413 2024-12-07T17:46:14,549 INFO [RS_CLOSE_META-regionserver/bd53b59592b3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34695/user/jenkins/test-data/8f6135a2-6946-11c3-f0e0-bb31dc5677ab/data/hbase/meta/1588230740/ns/3fbdc74ec8134b49a82834f857376413, entries=2, sequenceid=11, filesize=5.0 K 2024-12-07T17:46:14,551 DEBUG [RS_CLOSE_META-regionserver/bd53b59592b3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34695/user/jenkins/test-data/8f6135a2-6946-11c3-f0e0-bb31dc5677ab/data/hbase/meta/1588230740/.tmp/table/4c8f7fa9e39d4a51aec5994f1d62f077 as hdfs://localhost:34695/user/jenkins/test-data/8f6135a2-6946-11c3-f0e0-bb31dc5677ab/data/hbase/meta/1588230740/table/4c8f7fa9e39d4a51aec5994f1d62f077 2024-12-07T17:46:14,559 INFO [RS_CLOSE_META-regionserver/bd53b59592b3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34695/user/jenkins/test-data/8f6135a2-6946-11c3-f0e0-bb31dc5677ab/data/hbase/meta/1588230740/table/4c8f7fa9e39d4a51aec5994f1d62f077, entries=2, sequenceid=11, filesize=5.1 K 2024-12-07T17:46:14,560 INFO [RS_CLOSE_META-regionserver/bd53b59592b3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 117ms, sequenceid=11, compaction requested=false 2024-12-07T17:46:14,566 DEBUG [RS_CLOSE_META-regionserver/bd53b59592b3:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34695/user/jenkins/test-data/8f6135a2-6946-11c3-f0e0-bb31dc5677ab/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-12-07T17:46:14,567 DEBUG [RS_CLOSE_META-regionserver/bd53b59592b3:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-07T17:46:14,567 INFO [RS_CLOSE_META-regionserver/bd53b59592b3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-07T17:46:14,567 DEBUG [RS_CLOSE_META-regionserver/bd53b59592b3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733593574443Running coprocessor pre-close hooks at 1733593574443Disabling compacts and flushes for region at 1733593574443Disabling writes for close at 1733593574443Obtaining lock to block concurrent updates at 1733593574444 (+1 ms)Preparing flush snapshotting stores in 1588230740 at 1733593574444Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1377, getHeapSize=3392, getOffHeapSize=0, getCellsCount=14 at 1733593574444Flushing stores of hbase:meta,,1.1588230740 at 1733593574445 (+1 ms)Flushing 1588230740/info: creating writer at 1733593574445Flushing 1588230740/info: appending metadata at 1733593574460 (+15 ms)Flushing 1588230740/info: closing flushed file at 1733593574460Flushing 1588230740/ns: creating writer at 1733593574475 (+15 ms)Flushing 1588230740/ns: appending metadata at 1733593574490 (+15 ms)Flushing 1588230740/ns: closing flushed file at 1733593574491 (+1 ms)Flushing 1588230740/table: creating writer at 1733593574505 (+14 ms)Flushing 1588230740/table: appending metadata at 1733593574518 (+13 ms)Flushing 1588230740/table: closing flushed file at 1733593574518Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@11f73b98: reopening flushed file at 1733593574531 (+13 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@47963dc7: reopening flushed file at 1733593574541 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@38dc5bbe: reopening flushed file at 1733593574549 (+8 ms)Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 117ms, sequenceid=11, compaction requested=false at 1733593574560 (+11 ms)Writing region close event to WAL at 1733593574562 (+2 ms)Running coprocessor post-close hooks at 1733593574567 (+5 ms)Closed at 1733593574567 2024-12-07T17:46:14,567 DEBUG [RS_CLOSE_META-regionserver/bd53b59592b3:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-07T17:46:14,570 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35059-0x100017060d60003, quorum=127.0.0.1:49709, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-07T17:46:14,570 INFO [RS:2;bd53b59592b3:35059 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-07T17:46:14,570 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35059-0x100017060d60003, quorum=127.0.0.1:49709, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-07T17:46:14,570 INFO [RS:2;bd53b59592b3:35059 {}] regionserver.HRegionServer(1031): Exiting; stopping=bd53b59592b3,35059,1733593572156; zookeeper connection closed. 2024-12-07T17:46:14,570 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@7cb83fe0 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@7cb83fe0 2024-12-07T17:46:14,643 INFO [RS:1;bd53b59592b3:41571 {}] regionserver.HRegionServer(976): stopping server bd53b59592b3,41571,1733593572121; all regions closed. 2024-12-07T17:46:14,643 INFO [RS:0;bd53b59592b3:33337 {}] regionserver.HRegionServer(976): stopping server bd53b59592b3,33337,1733593572089; all regions closed. 2024-12-07T17:46:14,644 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T17:46:14,644 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T17:46:14,644 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T17:46:14,644 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T17:46:14,644 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T17:46:14,644 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T17:46:14,644 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T17:46:14,644 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T17:46:14,644 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T17:46:14,644 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T17:46:14,647 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34327 is added to blk_1073741836_1012 (size=2751) 2024-12-07T17:46:14,647 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34327 is added to blk_1073741835_1011 (size=1298) 2024-12-07T17:46:14,647 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41297 is added to blk_1073741835_1011 (size=1298) 2024-12-07T17:46:14,647 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34699 is added to blk_1073741836_1012 (size=2751) 2024-12-07T17:46:14,648 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34699 is added to blk_1073741835_1011 (size=1298) 2024-12-07T17:46:14,648 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41297 is added to blk_1073741836_1012 (size=2751) 2024-12-07T17:46:14,650 DEBUG [RS:0;bd53b59592b3:33337 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/8f6135a2-6946-11c3-f0e0-bb31dc5677ab/oldWALs 2024-12-07T17:46:14,650 INFO [RS:0;bd53b59592b3:33337 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog bd53b59592b3%2C33337%2C1733593572089.meta:.meta(num 1733593573172) 2024-12-07T17:46:14,650 DEBUG [RS:1;bd53b59592b3:41571 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/8f6135a2-6946-11c3-f0e0-bb31dc5677ab/oldWALs 2024-12-07T17:46:14,651 INFO [RS:1;bd53b59592b3:41571 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog bd53b59592b3%2C41571%2C1733593572121:(num 1733593572837) 2024-12-07T17:46:14,651 DEBUG [RS:1;bd53b59592b3:41571 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T17:46:14,651 INFO [RS:1;bd53b59592b3:41571 {}] regionserver.LeaseManager(133): Closed leases 2024-12-07T17:46:14,651 INFO [RS:1;bd53b59592b3:41571 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-07T17:46:14,651 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T17:46:14,651 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T17:46:14,651 INFO [RS:1;bd53b59592b3:41571 {}] hbase.ChoreService(370): Chore service for: regionserver/bd53b59592b3:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-12-07T17:46:14,651 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T17:46:14,651 INFO [RS:1;bd53b59592b3:41571 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-07T17:46:14,651 INFO [RS:1;bd53b59592b3:41571 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-07T17:46:14,651 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T17:46:14,651 INFO [RS:1;bd53b59592b3:41571 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-07T17:46:14,651 INFO [RS:1;bd53b59592b3:41571 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-07T17:46:14,651 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T17:46:14,652 INFO [regionserver/bd53b59592b3:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-07T17:46:14,652 INFO [RS:1;bd53b59592b3:41571 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:41571 2024-12-07T17:46:14,654 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34327 is added to blk_1073741833_1009 (size=93) 2024-12-07T17:46:14,654 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34699 is added to blk_1073741833_1009 (size=93) 2024-12-07T17:46:14,655 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41297 is added to blk_1073741833_1009 (size=93) 2024-12-07T17:46:14,657 DEBUG [RS:0;bd53b59592b3:33337 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/8f6135a2-6946-11c3-f0e0-bb31dc5677ab/oldWALs 2024-12-07T17:46:14,657 INFO [RS:0;bd53b59592b3:33337 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog bd53b59592b3%2C33337%2C1733593572089:(num 1733593572827) 2024-12-07T17:46:14,657 DEBUG [RS:0;bd53b59592b3:33337 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T17:46:14,657 INFO [RS:0;bd53b59592b3:33337 {}] regionserver.LeaseManager(133): Closed leases 2024-12-07T17:46:14,657 INFO [RS:0;bd53b59592b3:33337 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-07T17:46:14,657 INFO [RS:0;bd53b59592b3:33337 {}] hbase.ChoreService(370): Chore service for: regionserver/bd53b59592b3:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-07T17:46:14,657 INFO [RS:0;bd53b59592b3:33337 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-07T17:46:14,657 INFO [regionserver/bd53b59592b3:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-07T17:46:14,657 INFO [RS:0;bd53b59592b3:33337 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:33337 2024-12-07T17:46:14,686 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41571-0x100017060d60002, quorum=127.0.0.1:49709, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/bd53b59592b3,41571,1733593572121 2024-12-07T17:46:14,686 INFO [RS:1;bd53b59592b3:41571 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-07T17:46:14,686 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38409-0x100017060d60000, quorum=127.0.0.1:49709, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-07T17:46:14,695 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33337-0x100017060d60001, quorum=127.0.0.1:49709, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/bd53b59592b3,33337,1733593572089 2024-12-07T17:46:14,695 INFO [RS:0;bd53b59592b3:33337 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-07T17:46:14,703 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [bd53b59592b3,41571,1733593572121] 2024-12-07T17:46:14,713 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-12-07T17:46:14,714 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-12-07T17:46:14,720 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/bd53b59592b3,41571,1733593572121 already deleted, retry=false 2024-12-07T17:46:14,720 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; bd53b59592b3,41571,1733593572121 expired; onlineServers=1 2024-12-07T17:46:14,720 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [bd53b59592b3,33337,1733593572089] 2024-12-07T17:46:14,728 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/bd53b59592b3,33337,1733593572089 already deleted, retry=false 2024-12-07T17:46:14,728 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; bd53b59592b3,33337,1733593572089 expired; onlineServers=0 2024-12-07T17:46:14,728 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'bd53b59592b3,38409,1733593571919' ***** 2024-12-07T17:46:14,728 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-07T17:46:14,728 INFO [M:0;bd53b59592b3:38409 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-07T17:46:14,728 INFO [M:0;bd53b59592b3:38409 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-07T17:46:14,728 DEBUG [M:0;bd53b59592b3:38409 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-07T17:46:14,728 DEBUG [M:0;bd53b59592b3:38409 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-07T17:46:14,728 DEBUG [master/bd53b59592b3:0:becomeActiveMaster-HFileCleaner.small.0-1733593572524 {}] cleaner.HFileCleaner(306): Exit Thread[master/bd53b59592b3:0:becomeActiveMaster-HFileCleaner.small.0-1733593572524,5,FailOnTimeoutGroup] 2024-12-07T17:46:14,728 DEBUG [master/bd53b59592b3:0:becomeActiveMaster-HFileCleaner.large.0-1733593572523 {}] cleaner.HFileCleaner(306): Exit Thread[master/bd53b59592b3:0:becomeActiveMaster-HFileCleaner.large.0-1733593572523,5,FailOnTimeoutGroup] 2024-12-07T17:46:14,729 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-07T17:46:14,729 INFO [M:0;bd53b59592b3:38409 {}] hbase.ChoreService(370): Chore service for: master/bd53b59592b3:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-07T17:46:14,729 INFO [M:0;bd53b59592b3:38409 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-07T17:46:14,729 DEBUG [M:0;bd53b59592b3:38409 {}] master.HMaster(1795): Stopping service threads 2024-12-07T17:46:14,729 INFO [M:0;bd53b59592b3:38409 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-07T17:46:14,729 INFO [M:0;bd53b59592b3:38409 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-07T17:46:14,729 INFO [M:0;bd53b59592b3:38409 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-07T17:46:14,730 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-07T17:46:14,736 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38409-0x100017060d60000, quorum=127.0.0.1:49709, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-07T17:46:14,736 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38409-0x100017060d60000, quorum=127.0.0.1:49709, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T17:46:14,736 DEBUG [M:0;bd53b59592b3:38409 {}] zookeeper.ZKUtil(347): master:38409-0x100017060d60000, quorum=127.0.0.1:49709, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-07T17:46:14,736 WARN [M:0;bd53b59592b3:38409 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-07T17:46:14,737 INFO [M:0;bd53b59592b3:38409 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:34695/user/jenkins/test-data/8f6135a2-6946-11c3-f0e0-bb31dc5677ab/.lastflushedseqids 2024-12-07T17:46:14,746 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34699 is added to blk_1073741843_1019 (size=127) 2024-12-07T17:46:14,747 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41297 is added to blk_1073741843_1019 (size=127) 2024-12-07T17:46:14,747 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34327 is added to blk_1073741843_1019 (size=127) 2024-12-07T17:46:14,748 INFO [M:0;bd53b59592b3:38409 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-07T17:46:14,748 INFO [M:0;bd53b59592b3:38409 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-07T17:46:14,748 DEBUG [M:0;bd53b59592b3:38409 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-07T17:46:14,748 INFO [M:0;bd53b59592b3:38409 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T17:46:14,748 DEBUG [M:0;bd53b59592b3:38409 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T17:46:14,748 DEBUG [M:0;bd53b59592b3:38409 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-07T17:46:14,748 DEBUG [M:0;bd53b59592b3:38409 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T17:46:14,748 INFO [M:0;bd53b59592b3:38409 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=26.82 KB heapSize=34.11 KB 2024-12-07T17:46:14,766 DEBUG [M:0;bd53b59592b3:38409 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34695/user/jenkins/test-data/8f6135a2-6946-11c3-f0e0-bb31dc5677ab/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/2d8c0fd811514e32ae7fbbbe869a5119 is 82, key is hbase:meta,,1/info:regioninfo/1733593573215/Put/seqid=0 2024-12-07T17:46:14,768 WARN [IPC Server handler 1 on default port 34695 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-07T17:46:14,768 WARN [IPC Server handler 1 on default port 34695 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-07T17:46:14,768 WARN [IPC Server handler 1 on default port 34695 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-07T17:46:14,773 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34699 is added to blk_1073741844_1020 (size=5672) 2024-12-07T17:46:14,773 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41297 is added to blk_1073741844_1020 (size=5672) 2024-12-07T17:46:14,774 INFO [M:0;bd53b59592b3:38409 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:34695/user/jenkins/test-data/8f6135a2-6946-11c3-f0e0-bb31dc5677ab/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/2d8c0fd811514e32ae7fbbbe869a5119 2024-12-07T17:46:14,795 DEBUG [M:0;bd53b59592b3:38409 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34695/user/jenkins/test-data/8f6135a2-6946-11c3-f0e0-bb31dc5677ab/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/6cabdd8697e8409f9c829a23702cbfb2 is 747, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1733593573805/Put/seqid=0 2024-12-07T17:46:14,796 WARN [IPC Server handler 3 on default port 34695 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-07T17:46:14,796 WARN [IPC Server handler 3 on default port 34695 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-07T17:46:14,796 WARN [IPC Server handler 3 on default port 34695 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-07T17:46:14,800 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34699 is added to blk_1073741845_1021 (size=6438) 2024-12-07T17:46:14,800 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41297 is added to blk_1073741845_1021 (size=6438) 2024-12-07T17:46:14,801 INFO [M:0;bd53b59592b3:38409 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.13 KB at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:34695/user/jenkins/test-data/8f6135a2-6946-11c3-f0e0-bb31dc5677ab/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/6cabdd8697e8409f9c829a23702cbfb2 2024-12-07T17:46:14,803 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41571-0x100017060d60002, quorum=127.0.0.1:49709, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-07T17:46:14,803 INFO [RS:1;bd53b59592b3:41571 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-07T17:46:14,803 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41571-0x100017060d60002, quorum=127.0.0.1:49709, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-07T17:46:14,803 INFO [RS:1;bd53b59592b3:41571 {}] regionserver.HRegionServer(1031): Exiting; stopping=bd53b59592b3,41571,1733593572121; zookeeper connection closed. 2024-12-07T17:46:14,804 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@29f16ad3 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@29f16ad3 2024-12-07T17:46:14,812 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33337-0x100017060d60001, quorum=127.0.0.1:49709, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-07T17:46:14,812 INFO [RS:0;bd53b59592b3:33337 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-07T17:46:14,812 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33337-0x100017060d60001, quorum=127.0.0.1:49709, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-07T17:46:14,812 INFO [RS:0;bd53b59592b3:33337 {}] regionserver.HRegionServer(1031): Exiting; stopping=bd53b59592b3,33337,1733593572089; zookeeper connection closed. 2024-12-07T17:46:14,812 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@192a2791 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@192a2791 2024-12-07T17:46:14,812 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 3 regionserver(s) complete 2024-12-07T17:46:14,822 DEBUG [M:0;bd53b59592b3:38409 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34695/user/jenkins/test-data/8f6135a2-6946-11c3-f0e0-bb31dc5677ab/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/b57efa6311ab4ac2b44cbc4148c83d9f is 69, key is bd53b59592b3,33337,1733593572089/rs:state/1733593572639/Put/seqid=0 2024-12-07T17:46:14,828 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41297 is added to blk_1073741846_1022 (size=5294) 2024-12-07T17:46:14,828 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34699 is added to blk_1073741846_1022 (size=5294) 2024-12-07T17:46:14,828 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34327 is added to blk_1073741846_1022 (size=5294) 2024-12-07T17:46:14,829 INFO [M:0;bd53b59592b3:38409 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=195 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:34695/user/jenkins/test-data/8f6135a2-6946-11c3-f0e0-bb31dc5677ab/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/b57efa6311ab4ac2b44cbc4148c83d9f 2024-12-07T17:46:14,835 DEBUG [M:0;bd53b59592b3:38409 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34695/user/jenkins/test-data/8f6135a2-6946-11c3-f0e0-bb31dc5677ab/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/2d8c0fd811514e32ae7fbbbe869a5119 as hdfs://localhost:34695/user/jenkins/test-data/8f6135a2-6946-11c3-f0e0-bb31dc5677ab/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/2d8c0fd811514e32ae7fbbbe869a5119 2024-12-07T17:46:14,842 INFO [M:0;bd53b59592b3:38409 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34695/user/jenkins/test-data/8f6135a2-6946-11c3-f0e0-bb31dc5677ab/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/2d8c0fd811514e32ae7fbbbe869a5119, entries=8, sequenceid=72, filesize=5.5 K 2024-12-07T17:46:14,844 DEBUG [M:0;bd53b59592b3:38409 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34695/user/jenkins/test-data/8f6135a2-6946-11c3-f0e0-bb31dc5677ab/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/6cabdd8697e8409f9c829a23702cbfb2 as hdfs://localhost:34695/user/jenkins/test-data/8f6135a2-6946-11c3-f0e0-bb31dc5677ab/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/6cabdd8697e8409f9c829a23702cbfb2 2024-12-07T17:46:14,851 INFO [M:0;bd53b59592b3:38409 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34695/user/jenkins/test-data/8f6135a2-6946-11c3-f0e0-bb31dc5677ab/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/6cabdd8697e8409f9c829a23702cbfb2, entries=8, sequenceid=72, filesize=6.3 K 2024-12-07T17:46:14,852 DEBUG [M:0;bd53b59592b3:38409 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34695/user/jenkins/test-data/8f6135a2-6946-11c3-f0e0-bb31dc5677ab/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/b57efa6311ab4ac2b44cbc4148c83d9f as hdfs://localhost:34695/user/jenkins/test-data/8f6135a2-6946-11c3-f0e0-bb31dc5677ab/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/b57efa6311ab4ac2b44cbc4148c83d9f 2024-12-07T17:46:14,860 INFO [M:0;bd53b59592b3:38409 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34695/user/jenkins/test-data/8f6135a2-6946-11c3-f0e0-bb31dc5677ab/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/b57efa6311ab4ac2b44cbc4148c83d9f, entries=3, sequenceid=72, filesize=5.2 K 2024-12-07T17:46:14,861 INFO [M:0;bd53b59592b3:38409 {}] regionserver.HRegion(3140): Finished flush of dataSize ~26.82 KB/27459, heapSize ~33.81 KB/34624, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 113ms, sequenceid=72, compaction requested=false 2024-12-07T17:46:14,863 INFO [M:0;bd53b59592b3:38409 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T17:46:14,863 DEBUG [M:0;bd53b59592b3:38409 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733593574748Disabling compacts and flushes for region at 1733593574748Disabling writes for close at 1733593574748Obtaining lock to block concurrent updates at 1733593574748Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733593574748Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=27459, getHeapSize=34864, getOffHeapSize=0, getCellsCount=85 at 1733593574749 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733593574750 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733593574750Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733593574766 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733593574766Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733593574780 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733593574794 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733593574794Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733593574807 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733593574822 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733593574822Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@52e1aa32: reopening flushed file at 1733593574834 (+12 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@127f96cd: reopening flushed file at 1733593574843 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7161f408: reopening flushed file at 1733593574851 (+8 ms)Finished flush of dataSize ~26.82 KB/27459, heapSize ~33.81 KB/34624, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 113ms, sequenceid=72, compaction requested=false at 1733593574862 (+11 ms)Writing region close event to WAL at 1733593574863 (+1 ms)Closed at 1733593574863 2024-12-07T17:46:14,863 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T17:46:14,863 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T17:46:14,863 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T17:46:14,864 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T17:46:14,864 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T17:46:14,866 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34327 is added to blk_1073741830_1006 (size=32662) 2024-12-07T17:46:14,866 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34699 is added to blk_1073741830_1006 (size=32662) 2024-12-07T17:46:14,867 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41297 is added to blk_1073741830_1006 (size=32662) 2024-12-07T17:46:14,867 INFO [M:0;bd53b59592b3:38409 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-07T17:46:14,867 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-07T17:46:14,867 INFO [M:0;bd53b59592b3:38409 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:38409 2024-12-07T17:46:14,867 INFO [M:0;bd53b59592b3:38409 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-07T17:46:14,992 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38409-0x100017060d60000, quorum=127.0.0.1:49709, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-07T17:46:14,992 INFO [M:0;bd53b59592b3:38409 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-07T17:46:14,993 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38409-0x100017060d60000, quorum=127.0.0.1:49709, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-07T17:46:14,998 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@42d862a7{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-07T17:46:15,000 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@e8938f2{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-07T17:46:15,000 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-07T17:46:15,000 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@19dff04d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-07T17:46:15,001 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@438bc7ce{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3244666e-e0da-5ef4-db12-4bcfe7fb926c/hadoop.log.dir/,STOPPED} 2024-12-07T17:46:15,004 WARN [BP-1349556790-172.17.0.2-1733593569958 heartbeating to localhost/127.0.0.1:34695 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-07T17:46:15,004 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-07T17:46:15,004 WARN [BP-1349556790-172.17.0.2-1733593569958 heartbeating to localhost/127.0.0.1:34695 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1349556790-172.17.0.2-1733593569958 (Datanode Uuid 769d476f-880f-4ec7-a0d9-6c6db3372f84) service to localhost/127.0.0.1:34695 2024-12-07T17:46:15,004 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-07T17:46:15,004 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3244666e-e0da-5ef4-db12-4bcfe7fb926c/cluster_91fa9a77-5529-28e6-d5bb-5b332c274060/data/data5/current/BP-1349556790-172.17.0.2-1733593569958 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-07T17:46:15,005 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3244666e-e0da-5ef4-db12-4bcfe7fb926c/cluster_91fa9a77-5529-28e6-d5bb-5b332c274060/data/data6/current/BP-1349556790-172.17.0.2-1733593569958 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-07T17:46:15,005 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-07T17:46:15,006 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@77b0dcb8{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-07T17:46:15,007 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@63c7dc3a{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-07T17:46:15,007 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-07T17:46:15,007 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1768a8c1{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-07T17:46:15,007 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1b0441b5{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3244666e-e0da-5ef4-db12-4bcfe7fb926c/hadoop.log.dir/,STOPPED} 2024-12-07T17:46:15,008 WARN [BP-1349556790-172.17.0.2-1733593569958 heartbeating to localhost/127.0.0.1:34695 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-07T17:46:15,008 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-07T17:46:15,008 WARN [BP-1349556790-172.17.0.2-1733593569958 heartbeating to localhost/127.0.0.1:34695 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1349556790-172.17.0.2-1733593569958 (Datanode Uuid 296d6558-7640-42fa-9a2e-486ad4a715b1) service to localhost/127.0.0.1:34695 2024-12-07T17:46:15,008 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-07T17:46:15,009 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3244666e-e0da-5ef4-db12-4bcfe7fb926c/cluster_91fa9a77-5529-28e6-d5bb-5b332c274060/data/data3/current/BP-1349556790-172.17.0.2-1733593569958 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-07T17:46:15,009 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3244666e-e0da-5ef4-db12-4bcfe7fb926c/cluster_91fa9a77-5529-28e6-d5bb-5b332c274060/data/data4/current/BP-1349556790-172.17.0.2-1733593569958 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-07T17:46:15,009 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-07T17:46:15,011 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@14e9278c{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-07T17:46:15,011 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@18099ade{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-07T17:46:15,011 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-07T17:46:15,011 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7d6118e0{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-07T17:46:15,011 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@20a0e688{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3244666e-e0da-5ef4-db12-4bcfe7fb926c/hadoop.log.dir/,STOPPED} 2024-12-07T17:46:15,012 WARN [BP-1349556790-172.17.0.2-1733593569958 heartbeating to localhost/127.0.0.1:34695 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-07T17:46:15,012 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-07T17:46:15,012 WARN [BP-1349556790-172.17.0.2-1733593569958 heartbeating to localhost/127.0.0.1:34695 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1349556790-172.17.0.2-1733593569958 (Datanode Uuid 920c9691-b090-467c-8575-67b717b45c6a) service to localhost/127.0.0.1:34695 2024-12-07T17:46:15,012 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-07T17:46:15,013 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3244666e-e0da-5ef4-db12-4bcfe7fb926c/cluster_91fa9a77-5529-28e6-d5bb-5b332c274060/data/data1/current/BP-1349556790-172.17.0.2-1733593569958 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-07T17:46:15,013 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3244666e-e0da-5ef4-db12-4bcfe7fb926c/cluster_91fa9a77-5529-28e6-d5bb-5b332c274060/data/data2/current/BP-1349556790-172.17.0.2-1733593569958 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-07T17:46:15,014 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-07T17:46:15,019 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@de17eef{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-07T17:46:15,019 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2d329a96{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-07T17:46:15,019 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-07T17:46:15,020 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@686c9dd5{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-07T17:46:15,020 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@62802e4c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3244666e-e0da-5ef4-db12-4bcfe7fb926c/hadoop.log.dir/,STOPPED} 2024-12-07T17:46:15,028 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-07T17:46:15,053 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-07T17:46:15,059 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestHBaseWalOnEC#testReadWrite[1] Thread=156 (was 93) - Thread LEAK? -, OpenFileDescriptor=518 (was 439) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=151 (was 164), ProcessCount=11 (was 11), AvailableMemoryMB=19956 (was 20104)