2024-12-07 05:49:32,738 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba 2024-12-07 05:49:32,749 main DEBUG Took 0.008888 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-12-07 05:49:32,749 main DEBUG PluginManager 'Core' found 129 plugins 2024-12-07 05:49:32,750 main DEBUG PluginManager 'Level' found 0 plugins 2024-12-07 05:49:32,751 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-12-07 05:49:32,752 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-07 05:49:32,764 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-12-07 05:49:32,775 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-07 05:49:32,776 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-07 05:49:32,776 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-07 05:49:32,777 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-07 05:49:32,777 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-07 05:49:32,777 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-07 05:49:32,778 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-07 05:49:32,778 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-07 05:49:32,779 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-07 05:49:32,779 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-07 05:49:32,780 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-07 05:49:32,780 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-07 05:49:32,781 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-07 05:49:32,781 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-07 05:49:32,781 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-07 05:49:32,781 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-07 05:49:32,782 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-07 05:49:32,782 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-07 05:49:32,782 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-07 05:49:32,783 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-07 05:49:32,783 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-07 05:49:32,783 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-07 05:49:32,784 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-07 05:49:32,784 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-07 05:49:32,784 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-07 05:49:32,784 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-12-07 05:49:32,786 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-07 05:49:32,787 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-12-07 05:49:32,789 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-12-07 05:49:32,789 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-12-07 05:49:32,790 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-12-07 05:49:32,790 main DEBUG PluginManager 'Converter' found 47 plugins 2024-12-07 05:49:32,798 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-12-07 05:49:32,800 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-12-07 05:49:32,802 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-12-07 05:49:32,802 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-12-07 05:49:32,803 main DEBUG createAppenders(={Console}) 2024-12-07 05:49:32,803 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba initialized 2024-12-07 05:49:32,804 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba 2024-12-07 05:49:32,804 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba OK. 2024-12-07 05:49:32,804 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-12-07 05:49:32,805 main DEBUG OutputStream closed 2024-12-07 05:49:32,805 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-12-07 05:49:32,805 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-12-07 05:49:32,805 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@49c7b90e OK 2024-12-07 05:49:32,866 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-12-07 05:49:32,868 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-12-07 05:49:32,869 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-12-07 05:49:32,869 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-12-07 05:49:32,870 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-12-07 05:49:32,870 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-12-07 05:49:32,870 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-12-07 05:49:32,871 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-12-07 05:49:32,871 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-12-07 05:49:32,871 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-12-07 05:49:32,871 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-12-07 05:49:32,872 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-12-07 05:49:32,872 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-12-07 05:49:32,872 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-12-07 05:49:32,872 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-12-07 05:49:32,873 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-12-07 05:49:32,873 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-12-07 05:49:32,874 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-12-07 05:49:32,876 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-07 05:49:32,876 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-logging/target/hbase-logging-4.0.0-alpha-1-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@35432107) with optional ClassLoader: null 2024-12-07 05:49:32,876 main DEBUG Shutdown hook enabled. Registering a new one. 2024-12-07 05:49:32,877 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@35432107] started OK. 2024-12-07T05:49:32,889 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC timeout: 26 mins 2024-12-07 05:49:32,892 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-12-07 05:49:32,892 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-07T05:49:33,100 DEBUG [main {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6254c60a-17a2-d1eb-2791-88b46c3e8eb6 2024-12-07T05:49:33,121 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6254c60a-17a2-d1eb-2791-88b46c3e8eb6/cluster_0061e185-2488-1e51-61a4-aa5a92de8580, deleteOnExit=true 2024-12-07T05:49:33,122 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6254c60a-17a2-d1eb-2791-88b46c3e8eb6/test.cache.data in system properties and HBase conf 2024-12-07T05:49:33,123 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6254c60a-17a2-d1eb-2791-88b46c3e8eb6/hadoop.tmp.dir in system properties and HBase conf 2024-12-07T05:49:33,123 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6254c60a-17a2-d1eb-2791-88b46c3e8eb6/hadoop.log.dir in system properties and HBase conf 2024-12-07T05:49:33,124 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6254c60a-17a2-d1eb-2791-88b46c3e8eb6/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-07T05:49:33,124 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6254c60a-17a2-d1eb-2791-88b46c3e8eb6/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-07T05:49:33,125 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-07T05:49:33,204 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-12-07T05:49:33,282 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-07T05:49:33,285 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6254c60a-17a2-d1eb-2791-88b46c3e8eb6/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-07T05:49:33,285 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6254c60a-17a2-d1eb-2791-88b46c3e8eb6/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-07T05:49:33,286 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6254c60a-17a2-d1eb-2791-88b46c3e8eb6/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-07T05:49:33,286 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6254c60a-17a2-d1eb-2791-88b46c3e8eb6/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-07T05:49:33,286 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6254c60a-17a2-d1eb-2791-88b46c3e8eb6/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-07T05:49:33,287 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6254c60a-17a2-d1eb-2791-88b46c3e8eb6/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-07T05:49:33,287 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6254c60a-17a2-d1eb-2791-88b46c3e8eb6/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-07T05:49:33,287 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6254c60a-17a2-d1eb-2791-88b46c3e8eb6/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-07T05:49:33,288 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6254c60a-17a2-d1eb-2791-88b46c3e8eb6/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-07T05:49:33,288 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6254c60a-17a2-d1eb-2791-88b46c3e8eb6/nfs.dump.dir in system properties and HBase conf 2024-12-07T05:49:33,288 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6254c60a-17a2-d1eb-2791-88b46c3e8eb6/java.io.tmpdir in system properties and HBase conf 2024-12-07T05:49:33,289 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6254c60a-17a2-d1eb-2791-88b46c3e8eb6/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-07T05:49:33,289 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6254c60a-17a2-d1eb-2791-88b46c3e8eb6/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-07T05:49:33,289 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6254c60a-17a2-d1eb-2791-88b46c3e8eb6/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-07T05:49:34,198 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-12-07T05:49:34,259 INFO [Time-limited test {}] log.Log(170): Logging initialized @2069ms to org.eclipse.jetty.util.log.Slf4jLog 2024-12-07T05:49:34,319 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-07T05:49:34,370 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-07T05:49:34,386 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-07T05:49:34,386 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-07T05:49:34,387 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-07T05:49:34,398 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-07T05:49:34,401 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4395d44b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6254c60a-17a2-d1eb-2791-88b46c3e8eb6/hadoop.log.dir/,AVAILABLE} 2024-12-07T05:49:34,402 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4f93dd{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-07T05:49:34,565 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@58dbf239{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6254c60a-17a2-d1eb-2791-88b46c3e8eb6/java.io.tmpdir/jetty-localhost-37635-hadoop-hdfs-3_4_1-tests_jar-_-any-9898633118813378847/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-07T05:49:34,577 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@13e2962d{HTTP/1.1, (http/1.1)}{localhost:37635} 2024-12-07T05:49:34,577 INFO [Time-limited test {}] server.Server(415): Started @2389ms 2024-12-07T05:49:35,046 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-07T05:49:35,052 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-07T05:49:35,053 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-07T05:49:35,053 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-07T05:49:35,053 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-07T05:49:35,054 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@431e53b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6254c60a-17a2-d1eb-2791-88b46c3e8eb6/hadoop.log.dir/,AVAILABLE} 2024-12-07T05:49:35,054 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4dc262e0{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-07T05:49:35,147 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@65462677{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6254c60a-17a2-d1eb-2791-88b46c3e8eb6/java.io.tmpdir/jetty-localhost-44009-hadoop-hdfs-3_4_1-tests_jar-_-any-15534242683373333129/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-07T05:49:35,147 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@383014b{HTTP/1.1, (http/1.1)}{localhost:44009} 2024-12-07T05:49:35,147 INFO [Time-limited test {}] server.Server(415): Started @2959ms 2024-12-07T05:49:35,191 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-07T05:49:35,289 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-07T05:49:35,295 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-07T05:49:35,296 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-07T05:49:35,296 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-07T05:49:35,297 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-07T05:49:35,299 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@444b27d4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6254c60a-17a2-d1eb-2791-88b46c3e8eb6/hadoop.log.dir/,AVAILABLE} 2024-12-07T05:49:35,300 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6af5a446{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-07T05:49:35,397 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@513cab2c{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6254c60a-17a2-d1eb-2791-88b46c3e8eb6/java.io.tmpdir/jetty-localhost-41573-hadoop-hdfs-3_4_1-tests_jar-_-any-12135659624589317799/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-07T05:49:35,398 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@29a123ec{HTTP/1.1, (http/1.1)}{localhost:41573} 2024-12-07T05:49:35,398 INFO [Time-limited test {}] server.Server(415): Started @3209ms 2024-12-07T05:49:35,400 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-07T05:49:35,432 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-07T05:49:35,436 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-07T05:49:35,437 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-07T05:49:35,438 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-07T05:49:35,438 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-07T05:49:35,439 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@35e2f174{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6254c60a-17a2-d1eb-2791-88b46c3e8eb6/hadoop.log.dir/,AVAILABLE} 2024-12-07T05:49:35,439 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@343b36c2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-07T05:49:35,540 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@653e6301{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6254c60a-17a2-d1eb-2791-88b46c3e8eb6/java.io.tmpdir/jetty-localhost-37087-hadoop-hdfs-3_4_1-tests_jar-_-any-16571779038125688829/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-07T05:49:35,541 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@404caff2{HTTP/1.1, (http/1.1)}{localhost:37087} 2024-12-07T05:49:35,541 INFO [Time-limited test {}] server.Server(415): Started @3352ms 2024-12-07T05:49:35,543 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-07T05:49:36,499 WARN [Thread-125 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6254c60a-17a2-d1eb-2791-88b46c3e8eb6/cluster_0061e185-2488-1e51-61a4-aa5a92de8580/data/data2/current/BP-779431921-172.17.0.2-1733550573781/current, will proceed with Du for space computation calculation, 2024-12-07T05:49:36,499 WARN [Thread-122 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6254c60a-17a2-d1eb-2791-88b46c3e8eb6/cluster_0061e185-2488-1e51-61a4-aa5a92de8580/data/data3/current/BP-779431921-172.17.0.2-1733550573781/current, will proceed with Du for space computation calculation, 2024-12-07T05:49:36,499 WARN [Thread-124 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6254c60a-17a2-d1eb-2791-88b46c3e8eb6/cluster_0061e185-2488-1e51-61a4-aa5a92de8580/data/data4/current/BP-779431921-172.17.0.2-1733550573781/current, will proceed with Du for space computation calculation, 2024-12-07T05:49:36,499 WARN [Thread-123 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6254c60a-17a2-d1eb-2791-88b46c3e8eb6/cluster_0061e185-2488-1e51-61a4-aa5a92de8580/data/data1/current/BP-779431921-172.17.0.2-1733550573781/current, will proceed with Du for space computation calculation, 2024-12-07T05:49:36,530 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-07T05:49:36,530 WARN [Thread-81 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-07T05:49:36,563 WARN [Thread-143 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6254c60a-17a2-d1eb-2791-88b46c3e8eb6/cluster_0061e185-2488-1e51-61a4-aa5a92de8580/data/data6/current/BP-779431921-172.17.0.2-1733550573781/current, will proceed with Du for space computation calculation, 2024-12-07T05:49:36,563 WARN [Thread-142 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6254c60a-17a2-d1eb-2791-88b46c3e8eb6/cluster_0061e185-2488-1e51-61a4-aa5a92de8580/data/data5/current/BP-779431921-172.17.0.2-1733550573781/current, will proceed with Du for space computation calculation, 2024-12-07T05:49:36,574 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x4dec679668a71efe with lease ID 0xfe7e5a39de2087d7: Processing first storage report for DS-b522406b-6d23-43e3-bd9b-8ab3caee7af5 from datanode DatanodeRegistration(127.0.0.1:41373, datanodeUuid=f81be1d2-0b8b-4847-aa83-c251aa3bed14, infoPort=40679, infoSecurePort=0, ipcPort=38019, storageInfo=lv=-57;cid=testClusterID;nsid=868382139;c=1733550573781) 2024-12-07T05:49:36,576 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x4dec679668a71efe with lease ID 0xfe7e5a39de2087d7: from storage DS-b522406b-6d23-43e3-bd9b-8ab3caee7af5 node DatanodeRegistration(127.0.0.1:41373, datanodeUuid=f81be1d2-0b8b-4847-aa83-c251aa3bed14, infoPort=40679, infoSecurePort=0, ipcPort=38019, storageInfo=lv=-57;cid=testClusterID;nsid=868382139;c=1733550573781), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-07T05:49:36,576 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x6e1063fbbef64c15 with lease ID 0xfe7e5a39de2087d6: Processing first storage report for DS-6f84a8da-97db-4dd8-838e-f14171bd83d4 from datanode DatanodeRegistration(127.0.0.1:44585, datanodeUuid=c2b4d2a4-956e-4cc5-a019-01bce9f9e722, infoPort=35653, infoSecurePort=0, ipcPort=33629, storageInfo=lv=-57;cid=testClusterID;nsid=868382139;c=1733550573781) 2024-12-07T05:49:36,576 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x6e1063fbbef64c15 with lease ID 0xfe7e5a39de2087d6: from storage DS-6f84a8da-97db-4dd8-838e-f14171bd83d4 node DatanodeRegistration(127.0.0.1:44585, datanodeUuid=c2b4d2a4-956e-4cc5-a019-01bce9f9e722, infoPort=35653, infoSecurePort=0, ipcPort=33629, storageInfo=lv=-57;cid=testClusterID;nsid=868382139;c=1733550573781), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-07T05:49:36,576 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x4dec679668a71efe with lease ID 0xfe7e5a39de2087d7: Processing first storage report for DS-80baf0c8-1efb-48fc-8796-81d39f1141df from datanode DatanodeRegistration(127.0.0.1:41373, datanodeUuid=f81be1d2-0b8b-4847-aa83-c251aa3bed14, infoPort=40679, infoSecurePort=0, ipcPort=38019, storageInfo=lv=-57;cid=testClusterID;nsid=868382139;c=1733550573781) 2024-12-07T05:49:36,577 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x4dec679668a71efe with lease ID 0xfe7e5a39de2087d7: from storage DS-80baf0c8-1efb-48fc-8796-81d39f1141df node DatanodeRegistration(127.0.0.1:41373, datanodeUuid=f81be1d2-0b8b-4847-aa83-c251aa3bed14, infoPort=40679, infoSecurePort=0, ipcPort=38019, storageInfo=lv=-57;cid=testClusterID;nsid=868382139;c=1733550573781), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-07T05:49:36,577 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x6e1063fbbef64c15 with lease ID 0xfe7e5a39de2087d6: Processing first storage report for DS-5890831b-658a-4ceb-a7a1-01909eab011e from datanode DatanodeRegistration(127.0.0.1:44585, datanodeUuid=c2b4d2a4-956e-4cc5-a019-01bce9f9e722, infoPort=35653, infoSecurePort=0, ipcPort=33629, storageInfo=lv=-57;cid=testClusterID;nsid=868382139;c=1733550573781) 2024-12-07T05:49:36,577 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x6e1063fbbef64c15 with lease ID 0xfe7e5a39de2087d6: from storage DS-5890831b-658a-4ceb-a7a1-01909eab011e node DatanodeRegistration(127.0.0.1:44585, datanodeUuid=c2b4d2a4-956e-4cc5-a019-01bce9f9e722, infoPort=35653, infoSecurePort=0, ipcPort=33629, storageInfo=lv=-57;cid=testClusterID;nsid=868382139;c=1733550573781), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-07T05:49:36,586 WARN [Thread-103 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-07T05:49:36,590 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x99aa44602ff4c0cf with lease ID 0xfe7e5a39de2087d8: Processing first storage report for DS-b92c1bad-8adc-4cd9-a414-17b9a9a798ed from datanode DatanodeRegistration(127.0.0.1:37163, datanodeUuid=46869fc1-8536-4da5-a0db-c9c97f51bb0f, infoPort=34967, infoSecurePort=0, ipcPort=44061, storageInfo=lv=-57;cid=testClusterID;nsid=868382139;c=1733550573781) 2024-12-07T05:49:36,590 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x99aa44602ff4c0cf with lease ID 0xfe7e5a39de2087d8: from storage DS-b92c1bad-8adc-4cd9-a414-17b9a9a798ed node DatanodeRegistration(127.0.0.1:37163, datanodeUuid=46869fc1-8536-4da5-a0db-c9c97f51bb0f, infoPort=34967, infoSecurePort=0, ipcPort=44061, storageInfo=lv=-57;cid=testClusterID;nsid=868382139;c=1733550573781), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-07T05:49:36,590 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x99aa44602ff4c0cf with lease ID 0xfe7e5a39de2087d8: Processing first storage report for DS-81d5ef8e-28c4-441e-9fa1-35c67f044251 from datanode DatanodeRegistration(127.0.0.1:37163, datanodeUuid=46869fc1-8536-4da5-a0db-c9c97f51bb0f, infoPort=34967, infoSecurePort=0, ipcPort=44061, storageInfo=lv=-57;cid=testClusterID;nsid=868382139;c=1733550573781) 2024-12-07T05:49:36,591 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x99aa44602ff4c0cf with lease ID 0xfe7e5a39de2087d8: from storage DS-81d5ef8e-28c4-441e-9fa1-35c67f044251 node DatanodeRegistration(127.0.0.1:37163, datanodeUuid=46869fc1-8536-4da5-a0db-c9c97f51bb0f, infoPort=34967, infoSecurePort=0, ipcPort=44061, storageInfo=lv=-57;cid=testClusterID;nsid=868382139;c=1733550573781), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-07T05:49:36,629 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6254c60a-17a2-d1eb-2791-88b46c3e8eb6 2024-12-07T05:49:36,703 WARN [Time-limited test {}] erasurecode.ErasureCodeNative(55): ISA-L support is not available in your platform... using builtin-java codec where applicable 2024-12-07T05:49:36,746 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestHBaseWalOnEC#testReadWrite[0] Thread=159, OpenFileDescriptor=391, MaxFileDescriptor=1048576, SystemLoadAverage=197, ProcessCount=11, AvailableMemoryMB=8490 2024-12-07T05:49:36,748 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=3, rsPorts=, rsClass=null, numDataNodes=3, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-07T05:49:36,755 INFO [Time-limited test {}] hbase.HBaseTestingUtil(821): NOT STARTING DFS 2024-12-07T05:49:36,815 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6254c60a-17a2-d1eb-2791-88b46c3e8eb6/cluster_0061e185-2488-1e51-61a4-aa5a92de8580/zookeeper_0, clientPort=52563, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6254c60a-17a2-d1eb-2791-88b46c3e8eb6/cluster_0061e185-2488-1e51-61a4-aa5a92de8580/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6254c60a-17a2-d1eb-2791-88b46c3e8eb6/cluster_0061e185-2488-1e51-61a4-aa5a92de8580/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-07T05:49:36,825 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=52563 2024-12-07T05:49:36,833 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T05:49:36,836 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T05:49:36,931 WARN [Time-limited test {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-07T05:49:36,931 WARN [Time-limited test {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-07T05:49:36,968 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1395986970_22 at /127.0.0.1:51252 [Receiving block BP-779431921-172.17.0.2-1733550573781:blk_-9223372036854775792_1001] {}] datanode.DataXceiver(331): 127.0.0.1:37163:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51252 dst: /127.0.0.1:37163 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T05:49:36,987 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37163 is added to blk_-9223372036854775792_1002 (size=7) 2024-12-07T05:49:37,387 WARN [Time-limited test {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-07T05:49:37,397 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:32879/user/jenkins/test-data/ead4b57f-f2f8-e3b0-3de7-9b12170920ed with version=8 2024-12-07T05:49:37,397 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:32879/user/jenkins/test-data/ead4b57f-f2f8-e3b0-3de7-9b12170920ed/hbase-staging 2024-12-07T05:49:37,478 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-12-07T05:49:37,716 INFO [Time-limited test {}] client.ConnectionUtils(128): master/16b2ce7304d2:0 server-side Connection retries=45 2024-12-07T05:49:37,724 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-07T05:49:37,725 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-07T05:49:37,729 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-07T05:49:37,729 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-07T05:49:37,729 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-07T05:49:37,844 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-07T05:49:37,895 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-12-07T05:49:37,903 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-12-07T05:49:37,906 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-07T05:49:37,926 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 22692 (auto-detected) 2024-12-07T05:49:37,927 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-12-07T05:49:37,942 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:34827 2024-12-07T05:49:37,959 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:34827 connecting to ZooKeeper ensemble=127.0.0.1:52563 2024-12-07T05:49:38,042 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:348270x0, quorum=127.0.0.1:52563, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-07T05:49:38,045 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:34827-0x101afa027650000 connected 2024-12-07T05:49:38,115 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T05:49:38,118 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T05:49:38,130 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:34827-0x101afa027650000, quorum=127.0.0.1:52563, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-07T05:49:38,135 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:32879/user/jenkins/test-data/ead4b57f-f2f8-e3b0-3de7-9b12170920ed, hbase.cluster.distributed=false 2024-12-07T05:49:38,157 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:34827-0x101afa027650000, quorum=127.0.0.1:52563, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-07T05:49:38,162 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=34827 2024-12-07T05:49:38,163 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=34827 2024-12-07T05:49:38,163 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=34827 2024-12-07T05:49:38,164 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=34827 2024-12-07T05:49:38,164 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=34827 2024-12-07T05:49:38,246 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/16b2ce7304d2:0 server-side Connection retries=45 2024-12-07T05:49:38,248 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-07T05:49:38,248 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-07T05:49:38,248 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-07T05:49:38,248 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-07T05:49:38,248 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-07T05:49:38,251 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-07T05:49:38,252 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-07T05:49:38,253 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:45785 2024-12-07T05:49:38,255 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:45785 connecting to ZooKeeper ensemble=127.0.0.1:52563 2024-12-07T05:49:38,255 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T05:49:38,259 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T05:49:38,280 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:457850x0, quorum=127.0.0.1:52563, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-07T05:49:38,281 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45785-0x101afa027650001, quorum=127.0.0.1:52563, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-07T05:49:38,281 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:45785-0x101afa027650001 connected 2024-12-07T05:49:38,285 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-07T05:49:38,294 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-07T05:49:38,297 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45785-0x101afa027650001, quorum=127.0.0.1:52563, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-07T05:49:38,304 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45785-0x101afa027650001, quorum=127.0.0.1:52563, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-07T05:49:38,305 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=45785 2024-12-07T05:49:38,306 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=45785 2024-12-07T05:49:38,309 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=45785 2024-12-07T05:49:38,310 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=45785 2024-12-07T05:49:38,310 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=45785 2024-12-07T05:49:38,324 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/16b2ce7304d2:0 server-side Connection retries=45 2024-12-07T05:49:38,324 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-07T05:49:38,324 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-07T05:49:38,325 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-07T05:49:38,325 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-07T05:49:38,325 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-07T05:49:38,325 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-07T05:49:38,325 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-07T05:49:38,326 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:37395 2024-12-07T05:49:38,327 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:37395 connecting to ZooKeeper ensemble=127.0.0.1:52563 2024-12-07T05:49:38,328 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T05:49:38,331 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T05:49:38,364 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:373950x0, quorum=127.0.0.1:52563, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-07T05:49:38,365 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:37395-0x101afa027650002 connected 2024-12-07T05:49:38,365 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37395-0x101afa027650002, quorum=127.0.0.1:52563, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-07T05:49:38,365 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-07T05:49:38,366 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-07T05:49:38,368 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37395-0x101afa027650002, quorum=127.0.0.1:52563, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-07T05:49:38,370 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37395-0x101afa027650002, quorum=127.0.0.1:52563, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-07T05:49:38,371 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=37395 2024-12-07T05:49:38,371 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=37395 2024-12-07T05:49:38,372 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=37395 2024-12-07T05:49:38,373 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=37395 2024-12-07T05:49:38,373 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=37395 2024-12-07T05:49:38,393 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/16b2ce7304d2:0 server-side Connection retries=45 2024-12-07T05:49:38,393 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-07T05:49:38,393 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-07T05:49:38,393 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-07T05:49:38,394 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-07T05:49:38,394 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-07T05:49:38,394 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-07T05:49:38,394 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-07T05:49:38,395 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:43675 2024-12-07T05:49:38,396 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:43675 connecting to ZooKeeper ensemble=127.0.0.1:52563 2024-12-07T05:49:38,398 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T05:49:38,401 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T05:49:38,413 DEBUG [pool-77-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:436750x0, quorum=127.0.0.1:52563, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-07T05:49:38,414 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:43675-0x101afa027650003 connected 2024-12-07T05:49:38,414 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:43675-0x101afa027650003, quorum=127.0.0.1:52563, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-07T05:49:38,414 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-07T05:49:38,415 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-07T05:49:38,416 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:43675-0x101afa027650003, quorum=127.0.0.1:52563, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-07T05:49:38,419 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:43675-0x101afa027650003, quorum=127.0.0.1:52563, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-07T05:49:38,419 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=43675 2024-12-07T05:49:38,420 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=43675 2024-12-07T05:49:38,420 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=43675 2024-12-07T05:49:38,421 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=43675 2024-12-07T05:49:38,421 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=43675 2024-12-07T05:49:38,439 DEBUG [M:0;16b2ce7304d2:34827 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;16b2ce7304d2:34827 2024-12-07T05:49:38,440 INFO [master/16b2ce7304d2:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/16b2ce7304d2,34827,1733550577570 2024-12-07T05:49:38,455 DEBUG [pool-77-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43675-0x101afa027650003, quorum=127.0.0.1:52563, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-07T05:49:38,455 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45785-0x101afa027650001, quorum=127.0.0.1:52563, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-07T05:49:38,455 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34827-0x101afa027650000, quorum=127.0.0.1:52563, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-07T05:49:38,455 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37395-0x101afa027650002, quorum=127.0.0.1:52563, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-07T05:49:38,457 DEBUG [master/16b2ce7304d2:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:34827-0x101afa027650000, quorum=127.0.0.1:52563, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/16b2ce7304d2,34827,1733550577570 2024-12-07T05:49:38,480 DEBUG [pool-77-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43675-0x101afa027650003, quorum=127.0.0.1:52563, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-07T05:49:38,480 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45785-0x101afa027650001, quorum=127.0.0.1:52563, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-07T05:49:38,480 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37395-0x101afa027650002, quorum=127.0.0.1:52563, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-07T05:49:38,480 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34827-0x101afa027650000, quorum=127.0.0.1:52563, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T05:49:38,480 DEBUG [pool-77-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43675-0x101afa027650003, quorum=127.0.0.1:52563, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T05:49:38,480 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45785-0x101afa027650001, quorum=127.0.0.1:52563, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T05:49:38,480 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37395-0x101afa027650002, quorum=127.0.0.1:52563, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T05:49:38,481 DEBUG [master/16b2ce7304d2:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:34827-0x101afa027650000, quorum=127.0.0.1:52563, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-07T05:49:38,482 INFO [master/16b2ce7304d2:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/16b2ce7304d2,34827,1733550577570 from backup master directory 2024-12-07T05:49:38,488 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37395-0x101afa027650002, quorum=127.0.0.1:52563, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-07T05:49:38,488 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45785-0x101afa027650001, quorum=127.0.0.1:52563, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-07T05:49:38,488 DEBUG [pool-77-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43675-0x101afa027650003, quorum=127.0.0.1:52563, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-07T05:49:38,488 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34827-0x101afa027650000, quorum=127.0.0.1:52563, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/16b2ce7304d2,34827,1733550577570 2024-12-07T05:49:38,488 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34827-0x101afa027650000, quorum=127.0.0.1:52563, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-07T05:49:38,489 WARN [master/16b2ce7304d2:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-07T05:49:38,489 INFO [master/16b2ce7304d2:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=16b2ce7304d2,34827,1733550577570 2024-12-07T05:49:38,491 INFO [master/16b2ce7304d2:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-12-07T05:49:38,492 INFO [master/16b2ce7304d2:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-12-07T05:49:38,545 DEBUG [master/16b2ce7304d2:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:32879/user/jenkins/test-data/ead4b57f-f2f8-e3b0-3de7-9b12170920ed/hbase.id] with ID: 80984059-f814-42d6-8872-88fd89f88571 2024-12-07T05:49:38,546 DEBUG [master/16b2ce7304d2:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:32879/user/jenkins/test-data/ead4b57f-f2f8-e3b0-3de7-9b12170920ed/.tmp/hbase.id 2024-12-07T05:49:38,553 WARN [master/16b2ce7304d2:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-07T05:49:38,553 WARN [master/16b2ce7304d2:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-07T05:49:38,559 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1395986970_22 at /127.0.0.1:54322 [Receiving block BP-779431921-172.17.0.2-1733550573781:blk_-9223372036854775776_1003] {}] datanode.DataXceiver(331): 127.0.0.1:44585:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54322 dst: /127.0.0.1:44585 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T05:49:38,564 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44585 is added to blk_-9223372036854775776_1004 (size=42) 2024-12-07T05:49:38,565 WARN [master/16b2ce7304d2:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-07T05:49:38,565 DEBUG [master/16b2ce7304d2:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:32879/user/jenkins/test-data/ead4b57f-f2f8-e3b0-3de7-9b12170920ed/.tmp/hbase.id]:[hdfs://localhost:32879/user/jenkins/test-data/ead4b57f-f2f8-e3b0-3de7-9b12170920ed/hbase.id] 2024-12-07T05:49:38,611 INFO [master/16b2ce7304d2:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T05:49:38,617 INFO [master/16b2ce7304d2:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-07T05:49:38,637 INFO [master/16b2ce7304d2:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 18ms. 2024-12-07T05:49:38,663 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45785-0x101afa027650001, quorum=127.0.0.1:52563, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T05:49:38,663 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37395-0x101afa027650002, quorum=127.0.0.1:52563, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T05:49:38,663 DEBUG [pool-77-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43675-0x101afa027650003, quorum=127.0.0.1:52563, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T05:49:38,663 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34827-0x101afa027650000, quorum=127.0.0.1:52563, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T05:49:38,673 WARN [master/16b2ce7304d2:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-07T05:49:38,673 WARN [master/16b2ce7304d2:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-07T05:49:38,676 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1395986970_22 at /127.0.0.1:54358 [Receiving block BP-779431921-172.17.0.2-1733550573781:blk_-9223372036854775760_1005] {}] datanode.DataXceiver(331): 127.0.0.1:44585:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54358 dst: /127.0.0.1:44585 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T05:49:38,681 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44585 is added to blk_-9223372036854775760_1006 (size=196) 2024-12-07T05:49:38,684 WARN [master/16b2ce7304d2:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-07T05:49:38,697 INFO [master/16b2ce7304d2:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-07T05:49:38,698 INFO [master/16b2ce7304d2:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-07T05:49:38,703 INFO [master/16b2ce7304d2:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-07T05:49:38,729 WARN [master/16b2ce7304d2:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-07T05:49:38,729 WARN [master/16b2ce7304d2:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-07T05:49:38,732 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1395986970_22 at /127.0.0.1:52782 [Receiving block BP-779431921-172.17.0.2-1733550573781:blk_-9223372036854775744_1007] {}] datanode.DataXceiver(331): 127.0.0.1:41373:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:52782 dst: /127.0.0.1:41373 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T05:49:38,737 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41373 is added to blk_-9223372036854775744_1008 (size=1189) 2024-12-07T05:49:38,737 WARN [master/16b2ce7304d2:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-07T05:49:38,753 INFO [master/16b2ce7304d2:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:32879/user/jenkins/test-data/ead4b57f-f2f8-e3b0-3de7-9b12170920ed/MasterData/data/master/store 2024-12-07T05:49:38,767 WARN [master/16b2ce7304d2:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-07T05:49:38,768 WARN [master/16b2ce7304d2:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-07T05:49:38,770 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1395986970_22 at /127.0.0.1:52806 [Receiving block BP-779431921-172.17.0.2-1733550573781:blk_-9223372036854775728_1009] {}] datanode.DataXceiver(331): 127.0.0.1:41373:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:52806 dst: /127.0.0.1:41373 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T05:49:38,774 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41373 is added to blk_-9223372036854775728_1010 (size=34) 2024-12-07T05:49:38,775 WARN [master/16b2ce7304d2:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-07T05:49:38,779 INFO [master/16b2ce7304d2:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-12-07T05:49:38,781 DEBUG [master/16b2ce7304d2:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T05:49:38,782 DEBUG [master/16b2ce7304d2:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-07T05:49:38,783 INFO [master/16b2ce7304d2:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T05:49:38,783 DEBUG [master/16b2ce7304d2:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T05:49:38,784 DEBUG [master/16b2ce7304d2:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-07T05:49:38,784 DEBUG [master/16b2ce7304d2:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T05:49:38,784 INFO [master/16b2ce7304d2:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T05:49:38,785 DEBUG [master/16b2ce7304d2:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733550578782Disabling compacts and flushes for region at 1733550578782Disabling writes for close at 1733550578784 (+2 ms)Writing region close event to WAL at 1733550578784Closed at 1733550578784 2024-12-07T05:49:38,787 WARN [master/16b2ce7304d2:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:32879/user/jenkins/test-data/ead4b57f-f2f8-e3b0-3de7-9b12170920ed/MasterData/data/master/store/.initializing 2024-12-07T05:49:38,787 DEBUG [master/16b2ce7304d2:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:32879/user/jenkins/test-data/ead4b57f-f2f8-e3b0-3de7-9b12170920ed/MasterData/WALs/16b2ce7304d2,34827,1733550577570 2024-12-07T05:49:38,794 INFO [master/16b2ce7304d2:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-07T05:49:38,807 INFO [master/16b2ce7304d2:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=16b2ce7304d2%2C34827%2C1733550577570, suffix=, logDir=hdfs://localhost:32879/user/jenkins/test-data/ead4b57f-f2f8-e3b0-3de7-9b12170920ed/MasterData/WALs/16b2ce7304d2,34827,1733550577570, archiveDir=hdfs://localhost:32879/user/jenkins/test-data/ead4b57f-f2f8-e3b0-3de7-9b12170920ed/MasterData/oldWALs, maxLogs=10 2024-12-07T05:49:38,830 DEBUG [master/16b2ce7304d2:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/ead4b57f-f2f8-e3b0-3de7-9b12170920ed/MasterData/WALs/16b2ce7304d2,34827,1733550577570/16b2ce7304d2%2C34827%2C1733550577570.1733550578811, exclude list is [], retry=0 2024-12-07T05:49:38,846 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(244): No decryptEncryptedDataEncryptionKey method in DFSClient, should be hadoop version with HDFS-12396 java.lang.NoSuchMethodException: org.apache.hadoop.hdfs.DFSClient.decryptEncryptedDataEncryptionKey(org.apache.hadoop.fs.FileEncryptionInfo) at java.lang.Class.getDeclaredMethod(Class.java:2675) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelperWithoutHDFS12396(FanOutOneBlockAsyncDFSOutputSaslHelper.java:183) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelper(FanOutOneBlockAsyncDFSOutputSaslHelper.java:242) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.(FanOutOneBlockAsyncDFSOutputSaslHelper.java:253) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.initialize(FanOutOneBlockAsyncDFSOutputHelper.java:413) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper$5.operationComplete(FanOutOneBlockAsyncDFSOutputHelper.java:472) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper$5.operationComplete(FanOutOneBlockAsyncDFSOutputHelper.java:467) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.NettyFutureUtils.lambda$addListener$0(NettyFutureUtils.java:56) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListener0(DefaultPromise.java:590) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListeners0(DefaultPromise.java:583) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListenersNow(DefaultPromise.java:559) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListeners(DefaultPromise.java:492) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.setValue0(DefaultPromise.java:636) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.setSuccess0(DefaultPromise.java:625) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.trySuccess(DefaultPromise.java:105) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPromise.trySuccess(DefaultChannelPromise.java:84) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.fulfillConnectPromise(AbstractEpollChannel.java:658) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.finishConnect(AbstractEpollChannel.java:696) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.epollOutReady(AbstractEpollChannel.java:567) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.processReady(EpollEventLoop.java:491) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:399) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T05:49:38,847 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:37163,DS-b92c1bad-8adc-4cd9-a414-17b9a9a798ed,DISK] 2024-12-07T05:49:38,847 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44585,DS-6f84a8da-97db-4dd8-838e-f14171bd83d4,DISK] 2024-12-07T05:49:38,847 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:41373,DS-b522406b-6d23-43e3-bd9b-8ab3caee7af5,DISK] 2024-12-07T05:49:38,850 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.ProtobufDecoder(117): Hadoop 3.3 and above shades protobuf. 2024-12-07T05:49:38,886 INFO [master/16b2ce7304d2:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/ead4b57f-f2f8-e3b0-3de7-9b12170920ed/MasterData/WALs/16b2ce7304d2,34827,1733550577570/16b2ce7304d2%2C34827%2C1733550577570.1733550578811 2024-12-07T05:49:38,887 DEBUG [master/16b2ce7304d2:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:34967:34967),(127.0.0.1/127.0.0.1:40679:40679),(127.0.0.1/127.0.0.1:35653:35653)] 2024-12-07T05:49:38,888 DEBUG [master/16b2ce7304d2:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-07T05:49:38,888 DEBUG [master/16b2ce7304d2:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T05:49:38,891 DEBUG [master/16b2ce7304d2:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-07T05:49:38,891 DEBUG [master/16b2ce7304d2:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-07T05:49:38,924 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-07T05:49:38,944 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-07T05:49:38,947 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T05:49:38,949 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T05:49:38,949 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-07T05:49:38,952 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-07T05:49:38,952 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T05:49:38,953 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T05:49:38,954 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-07T05:49:38,956 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-07T05:49:38,956 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T05:49:38,957 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T05:49:38,957 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-07T05:49:38,959 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-07T05:49:38,960 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T05:49:38,961 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T05:49:38,961 DEBUG [master/16b2ce7304d2:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-07T05:49:38,964 DEBUG [master/16b2ce7304d2:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:32879/user/jenkins/test-data/ead4b57f-f2f8-e3b0-3de7-9b12170920ed/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-07T05:49:38,965 DEBUG [master/16b2ce7304d2:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:32879/user/jenkins/test-data/ead4b57f-f2f8-e3b0-3de7-9b12170920ed/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-07T05:49:38,969 DEBUG [master/16b2ce7304d2:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-07T05:49:38,970 DEBUG [master/16b2ce7304d2:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-07T05:49:38,973 DEBUG [master/16b2ce7304d2:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-07T05:49:38,977 DEBUG [master/16b2ce7304d2:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-07T05:49:38,983 DEBUG [master/16b2ce7304d2:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:32879/user/jenkins/test-data/ead4b57f-f2f8-e3b0-3de7-9b12170920ed/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-07T05:49:38,984 INFO [master/16b2ce7304d2:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=74152721, jitterRate=0.10496164858341217}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-07T05:49:38,990 DEBUG [master/16b2ce7304d2:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733550578902Initializing all the Stores at 1733550578904 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733550578904Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733550578905 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733550578905Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733550578905Cleaning up temporary data from old regions at 1733550578970 (+65 ms)Region opened successfully at 1733550578989 (+19 ms) 2024-12-07T05:49:38,991 INFO [master/16b2ce7304d2:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-07T05:49:39,024 DEBUG [master/16b2ce7304d2:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@490dd5d0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=16b2ce7304d2/172.17.0.2:0 2024-12-07T05:49:39,054 INFO [master/16b2ce7304d2:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-07T05:49:39,063 INFO [master/16b2ce7304d2:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-07T05:49:39,063 INFO [master/16b2ce7304d2:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-07T05:49:39,065 INFO [master/16b2ce7304d2:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-07T05:49:39,066 INFO [master/16b2ce7304d2:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 1 msec 2024-12-07T05:49:39,070 INFO [master/16b2ce7304d2:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 3 msec 2024-12-07T05:49:39,071 INFO [master/16b2ce7304d2:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-07T05:49:39,092 INFO [master/16b2ce7304d2:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-07T05:49:39,099 DEBUG [master/16b2ce7304d2:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34827-0x101afa027650000, quorum=127.0.0.1:52563, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-07T05:49:39,163 DEBUG [master/16b2ce7304d2:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-07T05:49:39,165 INFO [master/16b2ce7304d2:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-07T05:49:39,167 DEBUG [master/16b2ce7304d2:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34827-0x101afa027650000, quorum=127.0.0.1:52563, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-07T05:49:39,171 DEBUG [master/16b2ce7304d2:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-07T05:49:39,173 INFO [master/16b2ce7304d2:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-07T05:49:39,177 DEBUG [master/16b2ce7304d2:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34827-0x101afa027650000, quorum=127.0.0.1:52563, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-07T05:49:39,188 DEBUG [master/16b2ce7304d2:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-07T05:49:39,189 DEBUG [master/16b2ce7304d2:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34827-0x101afa027650000, quorum=127.0.0.1:52563, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-07T05:49:39,196 DEBUG [master/16b2ce7304d2:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-07T05:49:39,215 DEBUG [master/16b2ce7304d2:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34827-0x101afa027650000, quorum=127.0.0.1:52563, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-07T05:49:39,221 DEBUG [master/16b2ce7304d2:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-07T05:49:39,229 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34827-0x101afa027650000, quorum=127.0.0.1:52563, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-07T05:49:39,230 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45785-0x101afa027650001, quorum=127.0.0.1:52563, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-07T05:49:39,230 DEBUG [pool-77-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43675-0x101afa027650003, quorum=127.0.0.1:52563, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-07T05:49:39,230 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37395-0x101afa027650002, quorum=127.0.0.1:52563, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-07T05:49:39,230 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45785-0x101afa027650001, quorum=127.0.0.1:52563, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T05:49:39,230 DEBUG [pool-77-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43675-0x101afa027650003, quorum=127.0.0.1:52563, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T05:49:39,230 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34827-0x101afa027650000, quorum=127.0.0.1:52563, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T05:49:39,230 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37395-0x101afa027650002, quorum=127.0.0.1:52563, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T05:49:39,233 INFO [master/16b2ce7304d2:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=16b2ce7304d2,34827,1733550577570, sessionid=0x101afa027650000, setting cluster-up flag (Was=false) 2024-12-07T05:49:39,255 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34827-0x101afa027650000, quorum=127.0.0.1:52563, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T05:49:39,255 DEBUG [pool-77-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43675-0x101afa027650003, quorum=127.0.0.1:52563, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T05:49:39,255 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37395-0x101afa027650002, quorum=127.0.0.1:52563, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T05:49:39,255 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45785-0x101afa027650001, quorum=127.0.0.1:52563, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T05:49:39,280 DEBUG [master/16b2ce7304d2:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-07T05:49:39,284 DEBUG [master/16b2ce7304d2:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=16b2ce7304d2,34827,1733550577570 2024-12-07T05:49:39,304 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45785-0x101afa027650001, quorum=127.0.0.1:52563, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T05:49:39,304 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34827-0x101afa027650000, quorum=127.0.0.1:52563, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T05:49:39,304 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37395-0x101afa027650002, quorum=127.0.0.1:52563, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T05:49:39,305 DEBUG [pool-77-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43675-0x101afa027650003, quorum=127.0.0.1:52563, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T05:49:39,330 DEBUG [master/16b2ce7304d2:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-07T05:49:39,332 DEBUG [master/16b2ce7304d2:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=16b2ce7304d2,34827,1733550577570 2024-12-07T05:49:39,340 INFO [master/16b2ce7304d2:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:32879/user/jenkins/test-data/ead4b57f-f2f8-e3b0-3de7-9b12170920ed/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-07T05:49:39,398 DEBUG [master/16b2ce7304d2:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-07T05:49:39,407 INFO [master/16b2ce7304d2:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-07T05:49:39,414 INFO [master/16b2ce7304d2:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-07T05:49:39,420 DEBUG [master/16b2ce7304d2:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 16b2ce7304d2,34827,1733550577570 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-07T05:49:39,424 INFO [RS:1;16b2ce7304d2:37395 {}] regionserver.HRegionServer(746): ClusterId : 80984059-f814-42d6-8872-88fd89f88571 2024-12-07T05:49:39,424 INFO [RS:0;16b2ce7304d2:45785 {}] regionserver.HRegionServer(746): ClusterId : 80984059-f814-42d6-8872-88fd89f88571 2024-12-07T05:49:39,424 INFO [RS:2;16b2ce7304d2:43675 {}] regionserver.HRegionServer(746): ClusterId : 80984059-f814-42d6-8872-88fd89f88571 2024-12-07T05:49:39,427 DEBUG [RS:0;16b2ce7304d2:45785 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-07T05:49:39,427 DEBUG [RS:1;16b2ce7304d2:37395 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-07T05:49:39,427 DEBUG [RS:2;16b2ce7304d2:43675 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-07T05:49:39,428 DEBUG [master/16b2ce7304d2:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/16b2ce7304d2:0, corePoolSize=5, maxPoolSize=5 2024-12-07T05:49:39,428 DEBUG [master/16b2ce7304d2:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/16b2ce7304d2:0, corePoolSize=5, maxPoolSize=5 2024-12-07T05:49:39,428 DEBUG [master/16b2ce7304d2:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/16b2ce7304d2:0, corePoolSize=5, maxPoolSize=5 2024-12-07T05:49:39,429 DEBUG [master/16b2ce7304d2:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/16b2ce7304d2:0, corePoolSize=5, maxPoolSize=5 2024-12-07T05:49:39,429 DEBUG [master/16b2ce7304d2:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/16b2ce7304d2:0, corePoolSize=10, maxPoolSize=10 2024-12-07T05:49:39,429 DEBUG [master/16b2ce7304d2:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/16b2ce7304d2:0, corePoolSize=1, maxPoolSize=1 2024-12-07T05:49:39,429 DEBUG [master/16b2ce7304d2:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/16b2ce7304d2:0, corePoolSize=2, maxPoolSize=2 2024-12-07T05:49:39,429 DEBUG [master/16b2ce7304d2:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/16b2ce7304d2:0, corePoolSize=1, maxPoolSize=1 2024-12-07T05:49:39,433 INFO [master/16b2ce7304d2:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733550609433 2024-12-07T05:49:39,434 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-07T05:49:39,435 INFO [master/16b2ce7304d2:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-07T05:49:39,435 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-07T05:49:39,436 INFO [master/16b2ce7304d2:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-07T05:49:39,440 INFO [master/16b2ce7304d2:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-07T05:49:39,440 INFO [master/16b2ce7304d2:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-07T05:49:39,441 INFO [master/16b2ce7304d2:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-07T05:49:39,441 INFO [master/16b2ce7304d2:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-07T05:49:39,441 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T05:49:39,441 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-07T05:49:39,441 INFO [master/16b2ce7304d2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-07T05:49:39,447 INFO [master/16b2ce7304d2:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-07T05:49:39,448 INFO [master/16b2ce7304d2:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-07T05:49:39,449 INFO [master/16b2ce7304d2:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-07T05:49:39,450 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-07T05:49:39,450 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-07T05:49:39,452 INFO [master/16b2ce7304d2:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-07T05:49:39,452 INFO [master/16b2ce7304d2:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-07T05:49:39,453 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1395986970_22 at /127.0.0.1:52834 [Receiving block BP-779431921-172.17.0.2-1733550573781:blk_-9223372036854775712_1012] {}] datanode.DataXceiver(331): 127.0.0.1:41373:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:52834 dst: /127.0.0.1:41373 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T05:49:39,459 DEBUG [master/16b2ce7304d2:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/16b2ce7304d2:0:becomeActiveMaster-HFileCleaner.large.0-1733550579454,5,FailOnTimeoutGroup] 2024-12-07T05:49:39,460 DEBUG [master/16b2ce7304d2:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/16b2ce7304d2:0:becomeActiveMaster-HFileCleaner.small.0-1733550579459,5,FailOnTimeoutGroup] 2024-12-07T05:49:39,460 INFO [master/16b2ce7304d2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-07T05:49:39,460 INFO [master/16b2ce7304d2:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-07T05:49:39,461 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41373 is added to blk_-9223372036854775712_1013 (size=1321) 2024-12-07T05:49:39,461 INFO [master/16b2ce7304d2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-07T05:49:39,461 INFO [master/16b2ce7304d2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-07T05:49:39,462 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-07T05:49:39,464 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:32879/user/jenkins/test-data/ead4b57f-f2f8-e3b0-3de7-9b12170920ed/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-07T05:49:39,464 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:32879/user/jenkins/test-data/ead4b57f-f2f8-e3b0-3de7-9b12170920ed 2024-12-07T05:49:39,464 DEBUG [RS:1;16b2ce7304d2:37395 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-07T05:49:39,464 DEBUG [RS:0;16b2ce7304d2:45785 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-07T05:49:39,464 DEBUG [RS:2;16b2ce7304d2:43675 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-07T05:49:39,465 DEBUG [RS:1;16b2ce7304d2:37395 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-07T05:49:39,465 DEBUG [RS:0;16b2ce7304d2:45785 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-07T05:49:39,465 DEBUG [RS:2;16b2ce7304d2:43675 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-07T05:49:39,473 DEBUG [RS:2;16b2ce7304d2:43675 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-07T05:49:39,473 DEBUG [RS:1;16b2ce7304d2:37395 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-07T05:49:39,473 DEBUG [RS:1;16b2ce7304d2:37395 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@a2d6e3a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=16b2ce7304d2/172.17.0.2:0 2024-12-07T05:49:39,474 DEBUG [RS:0;16b2ce7304d2:45785 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-07T05:49:39,474 DEBUG [RS:0;16b2ce7304d2:45785 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@18a32f31, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=16b2ce7304d2/172.17.0.2:0 2024-12-07T05:49:39,474 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-07T05:49:39,473 DEBUG [RS:2;16b2ce7304d2:43675 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@79aa0d6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=16b2ce7304d2/172.17.0.2:0 2024-12-07T05:49:39,474 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-07T05:49:39,488 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1395986970_22 at /127.0.0.1:52852 [Receiving block BP-779431921-172.17.0.2-1733550573781:blk_-9223372036854775696_1014] {}] datanode.DataXceiver(331): 127.0.0.1:41373:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:52852 dst: /127.0.0.1:41373 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T05:49:39,496 DEBUG [RS:0;16b2ce7304d2:45785 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;16b2ce7304d2:45785 2024-12-07T05:49:39,496 DEBUG [RS:1;16b2ce7304d2:37395 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;16b2ce7304d2:37395 2024-12-07T05:49:39,499 INFO [RS:0;16b2ce7304d2:45785 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-07T05:49:39,499 INFO [RS:1;16b2ce7304d2:37395 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-07T05:49:39,499 INFO [RS:0;16b2ce7304d2:45785 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-07T05:49:39,499 DEBUG [RS:0;16b2ce7304d2:45785 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-07T05:49:39,499 INFO [RS:1;16b2ce7304d2:37395 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-07T05:49:39,500 DEBUG [RS:1;16b2ce7304d2:37395 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-07T05:49:39,500 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41373 is added to blk_-9223372036854775696_1015 (size=32) 2024-12-07T05:49:39,502 DEBUG [RS:2;16b2ce7304d2:43675 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:2;16b2ce7304d2:43675 2024-12-07T05:49:39,502 INFO [RS:2;16b2ce7304d2:43675 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-07T05:49:39,502 INFO [RS:2;16b2ce7304d2:43675 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-07T05:49:39,502 DEBUG [RS:2;16b2ce7304d2:43675 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-07T05:49:39,502 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-07T05:49:39,503 INFO [RS:1;16b2ce7304d2:37395 {}] regionserver.HRegionServer(2659): reportForDuty to master=16b2ce7304d2,34827,1733550577570 with port=37395, startcode=1733550578323 2024-12-07T05:49:39,503 INFO [RS:0;16b2ce7304d2:45785 {}] regionserver.HRegionServer(2659): reportForDuty to master=16b2ce7304d2,34827,1733550577570 with port=45785, startcode=1733550578218 2024-12-07T05:49:39,503 INFO [RS:2;16b2ce7304d2:43675 {}] regionserver.HRegionServer(2659): reportForDuty to master=16b2ce7304d2,34827,1733550577570 with port=43675, startcode=1733550578392 2024-12-07T05:49:39,504 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T05:49:39,511 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-07T05:49:39,514 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-07T05:49:39,515 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T05:49:39,516 DEBUG [RS:1;16b2ce7304d2:37395 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-07T05:49:39,516 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T05:49:39,516 DEBUG [RS:2;16b2ce7304d2:43675 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-07T05:49:39,516 DEBUG [RS:0;16b2ce7304d2:45785 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-07T05:49:39,516 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-07T05:49:39,520 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-07T05:49:39,520 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T05:49:39,521 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T05:49:39,522 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-07T05:49:39,525 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-07T05:49:39,525 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T05:49:39,526 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T05:49:39,527 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-07T05:49:39,530 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-07T05:49:39,530 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T05:49:39,531 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T05:49:39,532 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-07T05:49:39,534 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:32879/user/jenkins/test-data/ead4b57f-f2f8-e3b0-3de7-9b12170920ed/data/hbase/meta/1588230740 2024-12-07T05:49:39,535 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:32879/user/jenkins/test-data/ead4b57f-f2f8-e3b0-3de7-9b12170920ed/data/hbase/meta/1588230740 2024-12-07T05:49:39,538 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-07T05:49:39,539 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-07T05:49:39,544 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-07T05:49:39,547 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-07T05:49:39,556 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34285, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-12-07T05:49:39,556 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41375, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-12-07T05:49:39,556 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52437, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-12-07T05:49:39,566 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34827 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 16b2ce7304d2,43675,1733550578392 2024-12-07T05:49:39,569 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:32879/user/jenkins/test-data/ead4b57f-f2f8-e3b0-3de7-9b12170920ed/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-07T05:49:39,569 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34827 {}] master.ServerManager(517): Registering regionserver=16b2ce7304d2,43675,1733550578392 2024-12-07T05:49:39,570 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=72567337, jitterRate=0.0813375860452652}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-07T05:49:39,574 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733550579504Initializing all the Stores at 1733550579506 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733550579506Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733550579510 (+4 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733550579511 (+1 ms)Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733550579511Cleaning up temporary data from old regions at 1733550579539 (+28 ms)Region opened successfully at 1733550579573 (+34 ms) 2024-12-07T05:49:39,574 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-07T05:49:39,574 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-07T05:49:39,574 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-07T05:49:39,574 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-07T05:49:39,574 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-07T05:49:39,576 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-07T05:49:39,576 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733550579574Disabling compacts and flushes for region at 1733550579574Disabling writes for close at 1733550579574Writing region close event to WAL at 1733550579575 (+1 ms)Closed at 1733550579576 (+1 ms) 2024-12-07T05:49:39,580 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-07T05:49:39,580 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-07T05:49:39,582 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44585 is added to blk_-9223372036854775789_1002 (size=7) 2024-12-07T05:49:39,582 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41373 is added to blk_-9223372036854775788_1002 (size=7) 2024-12-07T05:49:39,583 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34827 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 16b2ce7304d2,37395,1733550578323 2024-12-07T05:49:39,583 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34827 {}] master.ServerManager(517): Registering regionserver=16b2ce7304d2,37395,1733550578323 2024-12-07T05:49:39,586 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34827 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 16b2ce7304d2,45785,1733550578218 2024-12-07T05:49:39,587 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34827 {}] master.ServerManager(517): Registering regionserver=16b2ce7304d2,45785,1733550578218 2024-12-07T05:49:39,587 DEBUG [RS:2;16b2ce7304d2:43675 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:32879/user/jenkins/test-data/ead4b57f-f2f8-e3b0-3de7-9b12170920ed 2024-12-07T05:49:39,587 DEBUG [RS:2;16b2ce7304d2:43675 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:32879 2024-12-07T05:49:39,587 DEBUG [RS:2;16b2ce7304d2:43675 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-07T05:49:39,587 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-07T05:49:39,588 DEBUG [RS:1;16b2ce7304d2:37395 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:32879/user/jenkins/test-data/ead4b57f-f2f8-e3b0-3de7-9b12170920ed 2024-12-07T05:49:39,588 DEBUG [RS:1;16b2ce7304d2:37395 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:32879 2024-12-07T05:49:39,588 DEBUG [RS:1;16b2ce7304d2:37395 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-07T05:49:39,594 DEBUG [RS:0;16b2ce7304d2:45785 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:32879/user/jenkins/test-data/ead4b57f-f2f8-e3b0-3de7-9b12170920ed 2024-12-07T05:49:39,594 DEBUG [RS:0;16b2ce7304d2:45785 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:32879 2024-12-07T05:49:39,594 DEBUG [RS:0;16b2ce7304d2:45785 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-07T05:49:39,597 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-07T05:49:39,599 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-07T05:49:39,645 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34827-0x101afa027650000, quorum=127.0.0.1:52563, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-07T05:49:39,655 DEBUG [RS:1;16b2ce7304d2:37395 {}] zookeeper.ZKUtil(111): regionserver:37395-0x101afa027650002, quorum=127.0.0.1:52563, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/16b2ce7304d2,37395,1733550578323 2024-12-07T05:49:39,655 DEBUG [RS:2;16b2ce7304d2:43675 {}] zookeeper.ZKUtil(111): regionserver:43675-0x101afa027650003, quorum=127.0.0.1:52563, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/16b2ce7304d2,43675,1733550578392 2024-12-07T05:49:39,655 WARN [RS:2;16b2ce7304d2:43675 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-07T05:49:39,655 WARN [RS:1;16b2ce7304d2:37395 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-07T05:49:39,655 INFO [RS:2;16b2ce7304d2:43675 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-07T05:49:39,655 INFO [RS:1;16b2ce7304d2:37395 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-07T05:49:39,655 DEBUG [RS:0;16b2ce7304d2:45785 {}] zookeeper.ZKUtil(111): regionserver:45785-0x101afa027650001, quorum=127.0.0.1:52563, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/16b2ce7304d2,45785,1733550578218 2024-12-07T05:49:39,655 WARN [RS:0;16b2ce7304d2:45785 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-07T05:49:39,655 DEBUG [RS:2;16b2ce7304d2:43675 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:32879/user/jenkins/test-data/ead4b57f-f2f8-e3b0-3de7-9b12170920ed/WALs/16b2ce7304d2,43675,1733550578392 2024-12-07T05:49:39,655 DEBUG [RS:1;16b2ce7304d2:37395 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:32879/user/jenkins/test-data/ead4b57f-f2f8-e3b0-3de7-9b12170920ed/WALs/16b2ce7304d2,37395,1733550578323 2024-12-07T05:49:39,656 INFO [RS:0;16b2ce7304d2:45785 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-07T05:49:39,656 DEBUG [RS:0;16b2ce7304d2:45785 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:32879/user/jenkins/test-data/ead4b57f-f2f8-e3b0-3de7-9b12170920ed/WALs/16b2ce7304d2,45785,1733550578218 2024-12-07T05:49:39,657 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [16b2ce7304d2,43675,1733550578392] 2024-12-07T05:49:39,657 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [16b2ce7304d2,37395,1733550578323] 2024-12-07T05:49:39,657 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [16b2ce7304d2,45785,1733550578218] 2024-12-07T05:49:39,682 INFO [RS:1;16b2ce7304d2:37395 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-07T05:49:39,682 INFO [RS:2;16b2ce7304d2:43675 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-07T05:49:39,682 INFO [RS:0;16b2ce7304d2:45785 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-07T05:49:39,694 INFO [RS:0;16b2ce7304d2:45785 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-07T05:49:39,694 INFO [RS:1;16b2ce7304d2:37395 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-07T05:49:39,694 INFO [RS:2;16b2ce7304d2:43675 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-07T05:49:39,700 INFO [RS:1;16b2ce7304d2:37395 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-07T05:49:39,700 INFO [RS:2;16b2ce7304d2:43675 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-07T05:49:39,700 INFO [RS:0;16b2ce7304d2:45785 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-07T05:49:39,700 INFO [RS:0;16b2ce7304d2:45785 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-07T05:49:39,700 INFO [RS:1;16b2ce7304d2:37395 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-07T05:49:39,700 INFO [RS:2;16b2ce7304d2:43675 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-07T05:49:39,703 INFO [RS:2;16b2ce7304d2:43675 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-07T05:49:39,703 INFO [RS:0;16b2ce7304d2:45785 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-07T05:49:39,703 INFO [RS:1;16b2ce7304d2:37395 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-07T05:49:39,709 INFO [RS:2;16b2ce7304d2:43675 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-07T05:49:39,709 INFO [RS:0;16b2ce7304d2:45785 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-07T05:49:39,709 INFO [RS:1;16b2ce7304d2:37395 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-07T05:49:39,710 INFO [RS:2;16b2ce7304d2:43675 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-07T05:49:39,710 INFO [RS:0;16b2ce7304d2:45785 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-07T05:49:39,710 INFO [RS:1;16b2ce7304d2:37395 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-07T05:49:39,711 DEBUG [RS:2;16b2ce7304d2:43675 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/16b2ce7304d2:0, corePoolSize=1, maxPoolSize=1 2024-12-07T05:49:39,711 DEBUG [RS:0;16b2ce7304d2:45785 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/16b2ce7304d2:0, corePoolSize=1, maxPoolSize=1 2024-12-07T05:49:39,711 DEBUG [RS:1;16b2ce7304d2:37395 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/16b2ce7304d2:0, corePoolSize=1, maxPoolSize=1 2024-12-07T05:49:39,711 DEBUG [RS:2;16b2ce7304d2:43675 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/16b2ce7304d2:0, corePoolSize=1, maxPoolSize=1 2024-12-07T05:49:39,711 DEBUG [RS:0;16b2ce7304d2:45785 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/16b2ce7304d2:0, corePoolSize=1, maxPoolSize=1 2024-12-07T05:49:39,711 DEBUG [RS:1;16b2ce7304d2:37395 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/16b2ce7304d2:0, corePoolSize=1, maxPoolSize=1 2024-12-07T05:49:39,711 DEBUG [RS:2;16b2ce7304d2:43675 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/16b2ce7304d2:0, corePoolSize=1, maxPoolSize=1 2024-12-07T05:49:39,711 DEBUG [RS:2;16b2ce7304d2:43675 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/16b2ce7304d2:0, corePoolSize=1, maxPoolSize=1 2024-12-07T05:49:39,711 DEBUG [RS:0;16b2ce7304d2:45785 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/16b2ce7304d2:0, corePoolSize=1, maxPoolSize=1 2024-12-07T05:49:39,711 DEBUG [RS:1;16b2ce7304d2:37395 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/16b2ce7304d2:0, corePoolSize=1, maxPoolSize=1 2024-12-07T05:49:39,711 DEBUG [RS:2;16b2ce7304d2:43675 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/16b2ce7304d2:0, corePoolSize=1, maxPoolSize=1 2024-12-07T05:49:39,711 DEBUG [RS:0;16b2ce7304d2:45785 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/16b2ce7304d2:0, corePoolSize=1, maxPoolSize=1 2024-12-07T05:49:39,711 DEBUG [RS:1;16b2ce7304d2:37395 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/16b2ce7304d2:0, corePoolSize=1, maxPoolSize=1 2024-12-07T05:49:39,711 DEBUG [RS:2;16b2ce7304d2:43675 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/16b2ce7304d2:0, corePoolSize=2, maxPoolSize=2 2024-12-07T05:49:39,711 DEBUG [RS:2;16b2ce7304d2:43675 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/16b2ce7304d2:0, corePoolSize=1, maxPoolSize=1 2024-12-07T05:49:39,711 DEBUG [RS:1;16b2ce7304d2:37395 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/16b2ce7304d2:0, corePoolSize=1, maxPoolSize=1 2024-12-07T05:49:39,711 DEBUG [RS:0;16b2ce7304d2:45785 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/16b2ce7304d2:0, corePoolSize=1, maxPoolSize=1 2024-12-07T05:49:39,711 DEBUG [RS:2;16b2ce7304d2:43675 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/16b2ce7304d2:0, corePoolSize=1, maxPoolSize=1 2024-12-07T05:49:39,711 DEBUG [RS:1;16b2ce7304d2:37395 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/16b2ce7304d2:0, corePoolSize=2, maxPoolSize=2 2024-12-07T05:49:39,711 DEBUG [RS:0;16b2ce7304d2:45785 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/16b2ce7304d2:0, corePoolSize=2, maxPoolSize=2 2024-12-07T05:49:39,711 DEBUG [RS:2;16b2ce7304d2:43675 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/16b2ce7304d2:0, corePoolSize=1, maxPoolSize=1 2024-12-07T05:49:39,711 DEBUG [RS:2;16b2ce7304d2:43675 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/16b2ce7304d2:0, corePoolSize=1, maxPoolSize=1 2024-12-07T05:49:39,711 DEBUG [RS:1;16b2ce7304d2:37395 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/16b2ce7304d2:0, corePoolSize=1, maxPoolSize=1 2024-12-07T05:49:39,711 DEBUG [RS:0;16b2ce7304d2:45785 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/16b2ce7304d2:0, corePoolSize=1, maxPoolSize=1 2024-12-07T05:49:39,712 DEBUG [RS:2;16b2ce7304d2:43675 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/16b2ce7304d2:0, corePoolSize=1, maxPoolSize=1 2024-12-07T05:49:39,712 DEBUG [RS:1;16b2ce7304d2:37395 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/16b2ce7304d2:0, corePoolSize=1, maxPoolSize=1 2024-12-07T05:49:39,712 DEBUG [RS:0;16b2ce7304d2:45785 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/16b2ce7304d2:0, corePoolSize=1, maxPoolSize=1 2024-12-07T05:49:39,712 DEBUG [RS:2;16b2ce7304d2:43675 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/16b2ce7304d2:0, corePoolSize=1, maxPoolSize=1 2024-12-07T05:49:39,712 DEBUG [RS:2;16b2ce7304d2:43675 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/16b2ce7304d2:0, corePoolSize=3, maxPoolSize=3 2024-12-07T05:49:39,712 DEBUG [RS:1;16b2ce7304d2:37395 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/16b2ce7304d2:0, corePoolSize=1, maxPoolSize=1 2024-12-07T05:49:39,712 DEBUG [RS:0;16b2ce7304d2:45785 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/16b2ce7304d2:0, corePoolSize=1, maxPoolSize=1 2024-12-07T05:49:39,712 DEBUG [RS:2;16b2ce7304d2:43675 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/16b2ce7304d2:0, corePoolSize=3, maxPoolSize=3 2024-12-07T05:49:39,712 DEBUG [RS:1;16b2ce7304d2:37395 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/16b2ce7304d2:0, corePoolSize=1, maxPoolSize=1 2024-12-07T05:49:39,712 DEBUG [RS:0;16b2ce7304d2:45785 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/16b2ce7304d2:0, corePoolSize=1, maxPoolSize=1 2024-12-07T05:49:39,712 DEBUG [RS:1;16b2ce7304d2:37395 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/16b2ce7304d2:0, corePoolSize=1, maxPoolSize=1 2024-12-07T05:49:39,712 DEBUG [RS:0;16b2ce7304d2:45785 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/16b2ce7304d2:0, corePoolSize=1, maxPoolSize=1 2024-12-07T05:49:39,712 DEBUG [RS:1;16b2ce7304d2:37395 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/16b2ce7304d2:0, corePoolSize=1, maxPoolSize=1 2024-12-07T05:49:39,712 DEBUG [RS:0;16b2ce7304d2:45785 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/16b2ce7304d2:0, corePoolSize=1, maxPoolSize=1 2024-12-07T05:49:39,712 DEBUG [RS:1;16b2ce7304d2:37395 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/16b2ce7304d2:0, corePoolSize=3, maxPoolSize=3 2024-12-07T05:49:39,712 DEBUG [RS:0;16b2ce7304d2:45785 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/16b2ce7304d2:0, corePoolSize=3, maxPoolSize=3 2024-12-07T05:49:39,712 DEBUG [RS:1;16b2ce7304d2:37395 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/16b2ce7304d2:0, corePoolSize=3, maxPoolSize=3 2024-12-07T05:49:39,712 DEBUG [RS:0;16b2ce7304d2:45785 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/16b2ce7304d2:0, corePoolSize=3, maxPoolSize=3 2024-12-07T05:49:39,713 INFO [RS:2;16b2ce7304d2:43675 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-07T05:49:39,713 INFO [RS:2;16b2ce7304d2:43675 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-07T05:49:39,713 INFO [RS:2;16b2ce7304d2:43675 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-07T05:49:39,713 INFO [RS:2;16b2ce7304d2:43675 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-07T05:49:39,714 INFO [RS:2;16b2ce7304d2:43675 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-07T05:49:39,714 INFO [RS:2;16b2ce7304d2:43675 {}] hbase.ChoreService(168): Chore ScheduledChore name=16b2ce7304d2,43675,1733550578392-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-07T05:49:39,717 INFO [RS:1;16b2ce7304d2:37395 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-07T05:49:39,717 INFO [RS:0;16b2ce7304d2:45785 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-07T05:49:39,718 INFO [RS:0;16b2ce7304d2:45785 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-07T05:49:39,718 INFO [RS:1;16b2ce7304d2:37395 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-07T05:49:39,718 INFO [RS:0;16b2ce7304d2:45785 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-07T05:49:39,718 INFO [RS:1;16b2ce7304d2:37395 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-07T05:49:39,718 INFO [RS:0;16b2ce7304d2:45785 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-07T05:49:39,718 INFO [RS:1;16b2ce7304d2:37395 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-07T05:49:39,718 INFO [RS:0;16b2ce7304d2:45785 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-07T05:49:39,718 INFO [RS:1;16b2ce7304d2:37395 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-07T05:49:39,718 INFO [RS:0;16b2ce7304d2:45785 {}] hbase.ChoreService(168): Chore ScheduledChore name=16b2ce7304d2,45785,1733550578218-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-07T05:49:39,718 INFO [RS:1;16b2ce7304d2:37395 {}] hbase.ChoreService(168): Chore ScheduledChore name=16b2ce7304d2,37395,1733550578323-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-07T05:49:39,734 INFO [RS:1;16b2ce7304d2:37395 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-07T05:49:39,735 INFO [RS:1;16b2ce7304d2:37395 {}] hbase.ChoreService(168): Chore ScheduledChore name=16b2ce7304d2,37395,1733550578323-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-07T05:49:39,735 INFO [RS:2;16b2ce7304d2:43675 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-07T05:49:39,736 INFO [RS:2;16b2ce7304d2:43675 {}] hbase.ChoreService(168): Chore ScheduledChore name=16b2ce7304d2,43675,1733550578392-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-07T05:49:39,736 INFO [RS:1;16b2ce7304d2:37395 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T05:49:39,736 INFO [RS:1;16b2ce7304d2:37395 {}] regionserver.Replication(171): 16b2ce7304d2,37395,1733550578323 started 2024-12-07T05:49:39,736 INFO [RS:2;16b2ce7304d2:43675 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T05:49:39,736 INFO [RS:2;16b2ce7304d2:43675 {}] regionserver.Replication(171): 16b2ce7304d2,43675,1733550578392 started 2024-12-07T05:49:39,737 INFO [RS:0;16b2ce7304d2:45785 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-07T05:49:39,737 INFO [RS:0;16b2ce7304d2:45785 {}] hbase.ChoreService(168): Chore ScheduledChore name=16b2ce7304d2,45785,1733550578218-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-07T05:49:39,737 INFO [RS:0;16b2ce7304d2:45785 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T05:49:39,737 INFO [RS:0;16b2ce7304d2:45785 {}] regionserver.Replication(171): 16b2ce7304d2,45785,1733550578218 started 2024-12-07T05:49:39,750 WARN [16b2ce7304d2:34827 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-07T05:49:39,752 INFO [RS:1;16b2ce7304d2:37395 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T05:49:39,752 INFO [RS:0;16b2ce7304d2:45785 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T05:49:39,752 INFO [RS:1;16b2ce7304d2:37395 {}] regionserver.HRegionServer(1482): Serving as 16b2ce7304d2,37395,1733550578323, RpcServer on 16b2ce7304d2/172.17.0.2:37395, sessionid=0x101afa027650002 2024-12-07T05:49:39,752 INFO [RS:0;16b2ce7304d2:45785 {}] regionserver.HRegionServer(1482): Serving as 16b2ce7304d2,45785,1733550578218, RpcServer on 16b2ce7304d2/172.17.0.2:45785, sessionid=0x101afa027650001 2024-12-07T05:49:39,753 DEBUG [RS:1;16b2ce7304d2:37395 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-07T05:49:39,753 DEBUG [RS:0;16b2ce7304d2:45785 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-07T05:49:39,753 DEBUG [RS:0;16b2ce7304d2:45785 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 16b2ce7304d2,45785,1733550578218 2024-12-07T05:49:39,753 DEBUG [RS:1;16b2ce7304d2:37395 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 16b2ce7304d2,37395,1733550578323 2024-12-07T05:49:39,753 DEBUG [RS:1;16b2ce7304d2:37395 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '16b2ce7304d2,37395,1733550578323' 2024-12-07T05:49:39,753 DEBUG [RS:0;16b2ce7304d2:45785 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '16b2ce7304d2,45785,1733550578218' 2024-12-07T05:49:39,753 DEBUG [RS:1;16b2ce7304d2:37395 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-07T05:49:39,753 DEBUG [RS:0;16b2ce7304d2:45785 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-07T05:49:39,754 DEBUG [RS:0;16b2ce7304d2:45785 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-07T05:49:39,754 DEBUG [RS:1;16b2ce7304d2:37395 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-07T05:49:39,755 DEBUG [RS:0;16b2ce7304d2:45785 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-07T05:49:39,755 DEBUG [RS:1;16b2ce7304d2:37395 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-07T05:49:39,755 DEBUG [RS:0;16b2ce7304d2:45785 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-07T05:49:39,755 DEBUG [RS:1;16b2ce7304d2:37395 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-07T05:49:39,755 DEBUG [RS:1;16b2ce7304d2:37395 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 16b2ce7304d2,37395,1733550578323 2024-12-07T05:49:39,755 DEBUG [RS:0;16b2ce7304d2:45785 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 16b2ce7304d2,45785,1733550578218 2024-12-07T05:49:39,755 DEBUG [RS:1;16b2ce7304d2:37395 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '16b2ce7304d2,37395,1733550578323' 2024-12-07T05:49:39,755 DEBUG [RS:0;16b2ce7304d2:45785 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '16b2ce7304d2,45785,1733550578218' 2024-12-07T05:49:39,755 DEBUG [RS:1;16b2ce7304d2:37395 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-07T05:49:39,755 DEBUG [RS:0;16b2ce7304d2:45785 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-07T05:49:39,756 DEBUG [RS:1;16b2ce7304d2:37395 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-07T05:49:39,756 DEBUG [RS:0;16b2ce7304d2:45785 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-07T05:49:39,756 DEBUG [RS:1;16b2ce7304d2:37395 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-07T05:49:39,756 DEBUG [RS:0;16b2ce7304d2:45785 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-07T05:49:39,756 INFO [RS:0;16b2ce7304d2:45785 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-07T05:49:39,756 INFO [RS:1;16b2ce7304d2:37395 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-07T05:49:39,756 INFO [RS:0;16b2ce7304d2:45785 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-07T05:49:39,756 INFO [RS:1;16b2ce7304d2:37395 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-07T05:49:39,757 INFO [RS:2;16b2ce7304d2:43675 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T05:49:39,757 INFO [RS:2;16b2ce7304d2:43675 {}] regionserver.HRegionServer(1482): Serving as 16b2ce7304d2,43675,1733550578392, RpcServer on 16b2ce7304d2/172.17.0.2:43675, sessionid=0x101afa027650003 2024-12-07T05:49:39,757 DEBUG [RS:2;16b2ce7304d2:43675 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-07T05:49:39,757 DEBUG [RS:2;16b2ce7304d2:43675 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 16b2ce7304d2,43675,1733550578392 2024-12-07T05:49:39,757 DEBUG [RS:2;16b2ce7304d2:43675 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '16b2ce7304d2,43675,1733550578392' 2024-12-07T05:49:39,757 DEBUG [RS:2;16b2ce7304d2:43675 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-07T05:49:39,758 DEBUG [RS:2;16b2ce7304d2:43675 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-07T05:49:39,759 DEBUG [RS:2;16b2ce7304d2:43675 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-07T05:49:39,759 DEBUG [RS:2;16b2ce7304d2:43675 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-07T05:49:39,759 DEBUG [RS:2;16b2ce7304d2:43675 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 16b2ce7304d2,43675,1733550578392 2024-12-07T05:49:39,759 DEBUG [RS:2;16b2ce7304d2:43675 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '16b2ce7304d2,43675,1733550578392' 2024-12-07T05:49:39,759 DEBUG [RS:2;16b2ce7304d2:43675 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-07T05:49:39,760 DEBUG [RS:2;16b2ce7304d2:43675 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-07T05:49:39,760 DEBUG [RS:2;16b2ce7304d2:43675 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-07T05:49:39,760 INFO [RS:2;16b2ce7304d2:43675 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-07T05:49:39,760 INFO [RS:2;16b2ce7304d2:43675 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-07T05:49:39,862 INFO [RS:1;16b2ce7304d2:37395 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-07T05:49:39,862 INFO [RS:2;16b2ce7304d2:43675 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-07T05:49:39,862 INFO [RS:0;16b2ce7304d2:45785 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-07T05:49:39,866 INFO [RS:1;16b2ce7304d2:37395 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=16b2ce7304d2%2C37395%2C1733550578323, suffix=, logDir=hdfs://localhost:32879/user/jenkins/test-data/ead4b57f-f2f8-e3b0-3de7-9b12170920ed/WALs/16b2ce7304d2,37395,1733550578323, archiveDir=hdfs://localhost:32879/user/jenkins/test-data/ead4b57f-f2f8-e3b0-3de7-9b12170920ed/oldWALs, maxLogs=32 2024-12-07T05:49:39,866 INFO [RS:0;16b2ce7304d2:45785 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=16b2ce7304d2%2C45785%2C1733550578218, suffix=, logDir=hdfs://localhost:32879/user/jenkins/test-data/ead4b57f-f2f8-e3b0-3de7-9b12170920ed/WALs/16b2ce7304d2,45785,1733550578218, archiveDir=hdfs://localhost:32879/user/jenkins/test-data/ead4b57f-f2f8-e3b0-3de7-9b12170920ed/oldWALs, maxLogs=32 2024-12-07T05:49:39,866 INFO [RS:2;16b2ce7304d2:43675 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=16b2ce7304d2%2C43675%2C1733550578392, suffix=, logDir=hdfs://localhost:32879/user/jenkins/test-data/ead4b57f-f2f8-e3b0-3de7-9b12170920ed/WALs/16b2ce7304d2,43675,1733550578392, archiveDir=hdfs://localhost:32879/user/jenkins/test-data/ead4b57f-f2f8-e3b0-3de7-9b12170920ed/oldWALs, maxLogs=32 2024-12-07T05:49:39,883 DEBUG [RS:0;16b2ce7304d2:45785 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/ead4b57f-f2f8-e3b0-3de7-9b12170920ed/WALs/16b2ce7304d2,45785,1733550578218/16b2ce7304d2%2C45785%2C1733550578218.1733550579870, exclude list is [], retry=0 2024-12-07T05:49:39,887 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:41373,DS-b522406b-6d23-43e3-bd9b-8ab3caee7af5,DISK] 2024-12-07T05:49:39,887 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:37163,DS-b92c1bad-8adc-4cd9-a414-17b9a9a798ed,DISK] 2024-12-07T05:49:39,888 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44585,DS-6f84a8da-97db-4dd8-838e-f14171bd83d4,DISK] 2024-12-07T05:49:39,888 DEBUG [RS:2;16b2ce7304d2:43675 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/ead4b57f-f2f8-e3b0-3de7-9b12170920ed/WALs/16b2ce7304d2,43675,1733550578392/16b2ce7304d2%2C43675%2C1733550578392.1733550579871, exclude list is [], retry=0 2024-12-07T05:49:39,888 DEBUG [RS:1;16b2ce7304d2:37395 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/ead4b57f-f2f8-e3b0-3de7-9b12170920ed/WALs/16b2ce7304d2,37395,1733550578323/16b2ce7304d2%2C37395%2C1733550578323.1733550579871, exclude list is [], retry=0 2024-12-07T05:49:39,912 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:41373,DS-b522406b-6d23-43e3-bd9b-8ab3caee7af5,DISK] 2024-12-07T05:49:39,912 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:37163,DS-b92c1bad-8adc-4cd9-a414-17b9a9a798ed,DISK] 2024-12-07T05:49:39,912 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44585,DS-6f84a8da-97db-4dd8-838e-f14171bd83d4,DISK] 2024-12-07T05:49:39,913 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:37163,DS-b92c1bad-8adc-4cd9-a414-17b9a9a798ed,DISK] 2024-12-07T05:49:39,913 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44585,DS-6f84a8da-97db-4dd8-838e-f14171bd83d4,DISK] 2024-12-07T05:49:39,914 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:41373,DS-b522406b-6d23-43e3-bd9b-8ab3caee7af5,DISK] 2024-12-07T05:49:39,918 INFO [RS:0;16b2ce7304d2:45785 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/ead4b57f-f2f8-e3b0-3de7-9b12170920ed/WALs/16b2ce7304d2,45785,1733550578218/16b2ce7304d2%2C45785%2C1733550578218.1733550579870 2024-12-07T05:49:39,920 DEBUG [RS:0;16b2ce7304d2:45785 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:34967:34967),(127.0.0.1/127.0.0.1:40679:40679),(127.0.0.1/127.0.0.1:35653:35653)] 2024-12-07T05:49:39,920 INFO [RS:2;16b2ce7304d2:43675 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/ead4b57f-f2f8-e3b0-3de7-9b12170920ed/WALs/16b2ce7304d2,43675,1733550578392/16b2ce7304d2%2C43675%2C1733550578392.1733550579871 2024-12-07T05:49:39,920 INFO [RS:1;16b2ce7304d2:37395 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/ead4b57f-f2f8-e3b0-3de7-9b12170920ed/WALs/16b2ce7304d2,37395,1733550578323/16b2ce7304d2%2C37395%2C1733550578323.1733550579871 2024-12-07T05:49:39,921 DEBUG [RS:2;16b2ce7304d2:43675 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:34967:34967),(127.0.0.1/127.0.0.1:35653:35653),(127.0.0.1/127.0.0.1:40679:40679)] 2024-12-07T05:49:39,921 DEBUG [RS:1;16b2ce7304d2:37395 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:40679:40679),(127.0.0.1/127.0.0.1:34967:34967),(127.0.0.1/127.0.0.1:35653:35653)] 2024-12-07T05:49:40,005 DEBUG [16b2ce7304d2:34827 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=3, allServersCount=3 2024-12-07T05:49:40,017 DEBUG [16b2ce7304d2:34827 {}] balancer.BalancerClusterState(204): Hosts are {16b2ce7304d2=0} racks are {/default-rack=0} 2024-12-07T05:49:40,023 DEBUG [16b2ce7304d2:34827 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-07T05:49:40,023 DEBUG [16b2ce7304d2:34827 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-07T05:49:40,023 DEBUG [16b2ce7304d2:34827 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-07T05:49:40,023 DEBUG [16b2ce7304d2:34827 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-07T05:49:40,023 DEBUG [16b2ce7304d2:34827 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-07T05:49:40,023 DEBUG [16b2ce7304d2:34827 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-07T05:49:40,023 INFO [16b2ce7304d2:34827 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-07T05:49:40,023 INFO [16b2ce7304d2:34827 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-07T05:49:40,023 INFO [16b2ce7304d2:34827 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-07T05:49:40,023 DEBUG [16b2ce7304d2:34827 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-07T05:49:40,029 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=16b2ce7304d2,45785,1733550578218 2024-12-07T05:49:40,035 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 16b2ce7304d2,45785,1733550578218, state=OPENING 2024-12-07T05:49:40,071 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-07T05:49:40,080 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34827-0x101afa027650000, quorum=127.0.0.1:52563, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T05:49:40,080 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45785-0x101afa027650001, quorum=127.0.0.1:52563, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T05:49:40,080 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37395-0x101afa027650002, quorum=127.0.0.1:52563, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T05:49:40,080 DEBUG [pool-77-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43675-0x101afa027650003, quorum=127.0.0.1:52563, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T05:49:40,081 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-07T05:49:40,081 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-07T05:49:40,082 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-07T05:49:40,082 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-07T05:49:40,084 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-07T05:49:40,087 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=16b2ce7304d2,45785,1733550578218}] 2024-12-07T05:49:40,260 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-07T05:49:40,263 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59403, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-07T05:49:40,275 INFO [RS_OPEN_META-regionserver/16b2ce7304d2:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-07T05:49:40,275 INFO [RS_OPEN_META-regionserver/16b2ce7304d2:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-07T05:49:40,276 INFO [RS_OPEN_META-regionserver/16b2ce7304d2:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-12-07T05:49:40,279 INFO [RS_OPEN_META-regionserver/16b2ce7304d2:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=16b2ce7304d2%2C45785%2C1733550578218.meta, suffix=.meta, logDir=hdfs://localhost:32879/user/jenkins/test-data/ead4b57f-f2f8-e3b0-3de7-9b12170920ed/WALs/16b2ce7304d2,45785,1733550578218, archiveDir=hdfs://localhost:32879/user/jenkins/test-data/ead4b57f-f2f8-e3b0-3de7-9b12170920ed/oldWALs, maxLogs=32 2024-12-07T05:49:40,295 DEBUG [RS_OPEN_META-regionserver/16b2ce7304d2:0-0 {event_type=M_RS_OPEN_META, pid=3}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/ead4b57f-f2f8-e3b0-3de7-9b12170920ed/WALs/16b2ce7304d2,45785,1733550578218/16b2ce7304d2%2C45785%2C1733550578218.meta.1733550580281.meta, exclude list is [], retry=0 2024-12-07T05:49:40,298 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44585,DS-6f84a8da-97db-4dd8-838e-f14171bd83d4,DISK] 2024-12-07T05:49:40,298 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:37163,DS-b92c1bad-8adc-4cd9-a414-17b9a9a798ed,DISK] 2024-12-07T05:49:40,299 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:41373,DS-b522406b-6d23-43e3-bd9b-8ab3caee7af5,DISK] 2024-12-07T05:49:40,302 INFO [RS_OPEN_META-regionserver/16b2ce7304d2:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/ead4b57f-f2f8-e3b0-3de7-9b12170920ed/WALs/16b2ce7304d2,45785,1733550578218/16b2ce7304d2%2C45785%2C1733550578218.meta.1733550580281.meta 2024-12-07T05:49:40,302 DEBUG [RS_OPEN_META-regionserver/16b2ce7304d2:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:35653:35653),(127.0.0.1/127.0.0.1:34967:34967),(127.0.0.1/127.0.0.1:40679:40679)] 2024-12-07T05:49:40,302 DEBUG [RS_OPEN_META-regionserver/16b2ce7304d2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-07T05:49:40,304 DEBUG [RS_OPEN_META-regionserver/16b2ce7304d2:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-07T05:49:40,306 DEBUG [RS_OPEN_META-regionserver/16b2ce7304d2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-07T05:49:40,310 INFO [RS_OPEN_META-regionserver/16b2ce7304d2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-07T05:49:40,313 DEBUG [RS_OPEN_META-regionserver/16b2ce7304d2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-07T05:49:40,313 DEBUG [RS_OPEN_META-regionserver/16b2ce7304d2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T05:49:40,313 DEBUG [RS_OPEN_META-regionserver/16b2ce7304d2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-07T05:49:40,314 DEBUG [RS_OPEN_META-regionserver/16b2ce7304d2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-07T05:49:40,317 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-07T05:49:40,318 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-07T05:49:40,318 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T05:49:40,319 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T05:49:40,319 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-07T05:49:40,320 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-07T05:49:40,320 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T05:49:40,321 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T05:49:40,321 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-07T05:49:40,323 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-07T05:49:40,323 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T05:49:40,324 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T05:49:40,324 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-07T05:49:40,325 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-07T05:49:40,325 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T05:49:40,326 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T05:49:40,326 DEBUG [RS_OPEN_META-regionserver/16b2ce7304d2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-07T05:49:40,328 DEBUG [RS_OPEN_META-regionserver/16b2ce7304d2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:32879/user/jenkins/test-data/ead4b57f-f2f8-e3b0-3de7-9b12170920ed/data/hbase/meta/1588230740 2024-12-07T05:49:40,330 DEBUG [RS_OPEN_META-regionserver/16b2ce7304d2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:32879/user/jenkins/test-data/ead4b57f-f2f8-e3b0-3de7-9b12170920ed/data/hbase/meta/1588230740 2024-12-07T05:49:40,332 DEBUG [RS_OPEN_META-regionserver/16b2ce7304d2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-07T05:49:40,332 DEBUG [RS_OPEN_META-regionserver/16b2ce7304d2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-07T05:49:40,333 DEBUG [RS_OPEN_META-regionserver/16b2ce7304d2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-07T05:49:40,336 DEBUG [RS_OPEN_META-regionserver/16b2ce7304d2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-07T05:49:40,337 INFO [RS_OPEN_META-regionserver/16b2ce7304d2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=74717455, jitterRate=0.11337684094905853}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-07T05:49:40,337 DEBUG [RS_OPEN_META-regionserver/16b2ce7304d2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-07T05:49:40,339 DEBUG [RS_OPEN_META-regionserver/16b2ce7304d2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733550580314Writing region info on filesystem at 1733550580314Initializing all the Stores at 1733550580316 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733550580316Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733550580316Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733550580316Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733550580316Cleaning up temporary data from old regions at 1733550580332 (+16 ms)Running coprocessor post-open hooks at 1733550580337 (+5 ms)Region opened successfully at 1733550580339 (+2 ms) 2024-12-07T05:49:40,345 INFO [RS_OPEN_META-regionserver/16b2ce7304d2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733550580252 2024-12-07T05:49:40,355 DEBUG [RS_OPEN_META-regionserver/16b2ce7304d2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-07T05:49:40,356 INFO [RS_OPEN_META-regionserver/16b2ce7304d2:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-07T05:49:40,358 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=16b2ce7304d2,45785,1733550578218 2024-12-07T05:49:40,360 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 16b2ce7304d2,45785,1733550578218, state=OPEN 2024-12-07T05:49:40,388 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37395-0x101afa027650002, quorum=127.0.0.1:52563, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-07T05:49:40,388 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45785-0x101afa027650001, quorum=127.0.0.1:52563, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-07T05:49:40,388 DEBUG [pool-77-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43675-0x101afa027650003, quorum=127.0.0.1:52563, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-07T05:49:40,388 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34827-0x101afa027650000, quorum=127.0.0.1:52563, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-07T05:49:40,388 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-07T05:49:40,388 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-07T05:49:40,388 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-07T05:49:40,388 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-07T05:49:40,389 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=16b2ce7304d2,45785,1733550578218 2024-12-07T05:49:40,398 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-07T05:49:40,398 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=16b2ce7304d2,45785,1733550578218 in 302 msec 2024-12-07T05:49:40,404 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-07T05:49:40,404 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 812 msec 2024-12-07T05:49:40,405 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-07T05:49:40,405 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-07T05:49:40,424 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-07T05:49:40,425 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=16b2ce7304d2,45785,1733550578218, seqNum=-1] 2024-12-07T05:49:40,445 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T05:49:40,447 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33139, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T05:49:40,485 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.1170 sec 2024-12-07T05:49:40,485 INFO [master/16b2ce7304d2:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733550580485, completionTime=-1 2024-12-07T05:49:40,488 INFO [master/16b2ce7304d2:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=3; waited=0ms, expected min=3 server(s), max=3 server(s), master is running 2024-12-07T05:49:40,489 DEBUG [master/16b2ce7304d2:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-07T05:49:40,515 INFO [master/16b2ce7304d2:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=3 2024-12-07T05:49:40,515 INFO [master/16b2ce7304d2:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733550640515 2024-12-07T05:49:40,515 INFO [master/16b2ce7304d2:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733550700515 2024-12-07T05:49:40,515 INFO [master/16b2ce7304d2:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 26 msec 2024-12-07T05:49:40,516 DEBUG [master/16b2ce7304d2:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 1588230740 changed from -1.0 to 0.0, refreshing cache 2024-12-07T05:49:40,522 INFO [master/16b2ce7304d2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=16b2ce7304d2,34827,1733550577570-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-07T05:49:40,522 INFO [master/16b2ce7304d2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=16b2ce7304d2,34827,1733550577570-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T05:49:40,522 INFO [master/16b2ce7304d2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=16b2ce7304d2,34827,1733550577570-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T05:49:40,523 INFO [master/16b2ce7304d2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-16b2ce7304d2:34827, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T05:49:40,524 INFO [master/16b2ce7304d2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-07T05:49:40,524 INFO [master/16b2ce7304d2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-07T05:49:40,529 DEBUG [master/16b2ce7304d2:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-07T05:49:40,547 INFO [master/16b2ce7304d2:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 2.058sec 2024-12-07T05:49:40,549 INFO [master/16b2ce7304d2:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-07T05:49:40,550 INFO [master/16b2ce7304d2:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-07T05:49:40,551 INFO [master/16b2ce7304d2:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-07T05:49:40,551 INFO [master/16b2ce7304d2:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-07T05:49:40,552 INFO [master/16b2ce7304d2:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-07T05:49:40,552 INFO [master/16b2ce7304d2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=16b2ce7304d2,34827,1733550577570-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-07T05:49:40,553 INFO [master/16b2ce7304d2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=16b2ce7304d2,34827,1733550577570-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-07T05:49:40,557 DEBUG [master/16b2ce7304d2:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-07T05:49:40,558 INFO [master/16b2ce7304d2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-07T05:49:40,559 INFO [master/16b2ce7304d2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=16b2ce7304d2,34827,1733550577570-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T05:49:40,633 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@63d6d57f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T05:49:40,637 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-12-07T05:49:40,637 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-12-07T05:49:40,640 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 16b2ce7304d2,34827,-1 for getting cluster id 2024-12-07T05:49:40,643 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-07T05:49:40,649 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '80984059-f814-42d6-8872-88fd89f88571' 2024-12-07T05:49:40,651 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-07T05:49:40,652 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "80984059-f814-42d6-8872-88fd89f88571" 2024-12-07T05:49:40,652 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@630223d3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T05:49:40,652 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [16b2ce7304d2,34827,-1] 2024-12-07T05:49:40,654 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-07T05:49:40,656 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T05:49:40,657 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47448, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-07T05:49:40,660 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@361b549b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T05:49:40,661 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-07T05:49:40,668 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=16b2ce7304d2,45785,1733550578218, seqNum=-1] 2024-12-07T05:49:40,668 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T05:49:40,671 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50570, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T05:49:40,691 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=16b2ce7304d2,34827,1733550577570 2024-12-07T05:49:40,695 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-07T05:49:40,699 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.AsyncConnectionImpl(321): The fetched master address is 16b2ce7304d2,34827,1733550577570 2024-12-07T05:49:40,701 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@475e4c1b 2024-12-07T05:49:40,701 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-07T05:49:40,704 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47454, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-07T05:49:40,709 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34827 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-07T05:49:40,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34827 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC 2024-12-07T05:49:40,719 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_PRE_OPERATION 2024-12-07T05:49:40,721 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34827 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestHBaseWalOnEC" procId is: 4 2024-12-07T05:49:40,721 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T05:49:40,724 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-07T05:49:40,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34827 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-07T05:49:40,733 WARN [PEWorker-3 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-07T05:49:40,733 WARN [PEWorker-3 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-07T05:49:40,736 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1395986970_22 at /127.0.0.1:54434 [Receiving block BP-779431921-172.17.0.2-1733550573781:blk_-9223372036854775680_1020] {}] datanode.DataXceiver(331): 127.0.0.1:44585:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54434 dst: /127.0.0.1:44585 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T05:49:40,740 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44585 is added to blk_-9223372036854775680_1021 (size=392) 2024-12-07T05:49:40,741 WARN [PEWorker-3 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-07T05:49:40,744 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 79a2dc6db850561a0d4a48865ba2b14a, NAME => 'TestHBaseWalOnEC,,1733550580705.79a2dc6db850561a0d4a48865ba2b14a.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:32879/user/jenkins/test-data/ead4b57f-f2f8-e3b0-3de7-9b12170920ed 2024-12-07T05:49:40,750 WARN [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-07T05:49:40,751 WARN [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-07T05:49:40,756 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1395986970_22 at /127.0.0.1:51328 [Receiving block BP-779431921-172.17.0.2-1733550573781:blk_-9223372036854775664_1022] {}] datanode.DataXceiver(331): 127.0.0.1:37163:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51328 dst: /127.0.0.1:37163 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T05:49:40,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37163 is added to blk_-9223372036854775664_1023 (size=51) 2024-12-07T05:49:40,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34827 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-07T05:49:41,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34827 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-07T05:49:41,163 WARN [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-07T05:49:41,163 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1733550580705.79a2dc6db850561a0d4a48865ba2b14a.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T05:49:41,164 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1722): Closing 79a2dc6db850561a0d4a48865ba2b14a, disabling compactions & flushes 2024-12-07T05:49:41,164 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1733550580705.79a2dc6db850561a0d4a48865ba2b14a. 2024-12-07T05:49:41,164 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1733550580705.79a2dc6db850561a0d4a48865ba2b14a. 2024-12-07T05:49:41,164 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1733550580705.79a2dc6db850561a0d4a48865ba2b14a. after waiting 0 ms 2024-12-07T05:49:41,164 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1733550580705.79a2dc6db850561a0d4a48865ba2b14a. 2024-12-07T05:49:41,164 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1733550580705.79a2dc6db850561a0d4a48865ba2b14a. 2024-12-07T05:49:41,164 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1676): Region close journal for 79a2dc6db850561a0d4a48865ba2b14a: Waiting for close lock at 1733550581163Disabling compacts and flushes for region at 1733550581163Disabling writes for close at 1733550581164 (+1 ms)Writing region close event to WAL at 1733550581164Closed at 1733550581164 2024-12-07T05:49:41,168 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ADD_TO_META 2024-12-07T05:49:41,174 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestHBaseWalOnEC,,1733550580705.79a2dc6db850561a0d4a48865ba2b14a.","families":{"info":[{"qualifier":"regioninfo","vlen":50,"tag":[],"timestamp":"1733550581168"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733550581168"}]},"ts":"1733550581168"} 2024-12-07T05:49:41,178 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-07T05:49:41,180 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-07T05:49:41,182 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733550581180"}]},"ts":"1733550581180"} 2024-12-07T05:49:41,186 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLING in hbase:meta 2024-12-07T05:49:41,186 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {16b2ce7304d2=0} racks are {/default-rack=0} 2024-12-07T05:49:41,188 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-07T05:49:41,188 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-07T05:49:41,188 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-07T05:49:41,188 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-07T05:49:41,188 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-07T05:49:41,188 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-07T05:49:41,188 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-07T05:49:41,188 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-07T05:49:41,188 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-07T05:49:41,188 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-07T05:49:41,190 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=79a2dc6db850561a0d4a48865ba2b14a, ASSIGN}] 2024-12-07T05:49:41,193 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=79a2dc6db850561a0d4a48865ba2b14a, ASSIGN 2024-12-07T05:49:41,194 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=79a2dc6db850561a0d4a48865ba2b14a, ASSIGN; state=OFFLINE, location=16b2ce7304d2,45785,1733550578218; forceNewPlan=false, retain=false 2024-12-07T05:49:41,347 INFO [16b2ce7304d2:34827 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-07T05:49:41,348 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=79a2dc6db850561a0d4a48865ba2b14a, regionState=OPENING, regionLocation=16b2ce7304d2,45785,1733550578218 2024-12-07T05:49:41,353 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=79a2dc6db850561a0d4a48865ba2b14a, ASSIGN because future has completed 2024-12-07T05:49:41,354 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 79a2dc6db850561a0d4a48865ba2b14a, server=16b2ce7304d2,45785,1733550578218}] 2024-12-07T05:49:41,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34827 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-07T05:49:41,518 INFO [RS_OPEN_REGION-regionserver/16b2ce7304d2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestHBaseWalOnEC,,1733550580705.79a2dc6db850561a0d4a48865ba2b14a. 2024-12-07T05:49:41,518 DEBUG [RS_OPEN_REGION-regionserver/16b2ce7304d2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 79a2dc6db850561a0d4a48865ba2b14a, NAME => 'TestHBaseWalOnEC,,1733550580705.79a2dc6db850561a0d4a48865ba2b14a.', STARTKEY => '', ENDKEY => ''} 2024-12-07T05:49:41,519 DEBUG [RS_OPEN_REGION-regionserver/16b2ce7304d2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestHBaseWalOnEC 79a2dc6db850561a0d4a48865ba2b14a 2024-12-07T05:49:41,519 DEBUG [RS_OPEN_REGION-regionserver/16b2ce7304d2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1733550580705.79a2dc6db850561a0d4a48865ba2b14a.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T05:49:41,519 DEBUG [RS_OPEN_REGION-regionserver/16b2ce7304d2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 79a2dc6db850561a0d4a48865ba2b14a 2024-12-07T05:49:41,519 DEBUG [RS_OPEN_REGION-regionserver/16b2ce7304d2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 79a2dc6db850561a0d4a48865ba2b14a 2024-12-07T05:49:41,522 INFO [StoreOpener-79a2dc6db850561a0d4a48865ba2b14a-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 79a2dc6db850561a0d4a48865ba2b14a 2024-12-07T05:49:41,524 INFO [StoreOpener-79a2dc6db850561a0d4a48865ba2b14a-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 79a2dc6db850561a0d4a48865ba2b14a columnFamilyName cf 2024-12-07T05:49:41,524 DEBUG [StoreOpener-79a2dc6db850561a0d4a48865ba2b14a-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T05:49:41,525 INFO [StoreOpener-79a2dc6db850561a0d4a48865ba2b14a-1 {}] regionserver.HStore(327): Store=79a2dc6db850561a0d4a48865ba2b14a/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T05:49:41,525 DEBUG [RS_OPEN_REGION-regionserver/16b2ce7304d2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 79a2dc6db850561a0d4a48865ba2b14a 2024-12-07T05:49:41,527 DEBUG [RS_OPEN_REGION-regionserver/16b2ce7304d2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:32879/user/jenkins/test-data/ead4b57f-f2f8-e3b0-3de7-9b12170920ed/data/default/TestHBaseWalOnEC/79a2dc6db850561a0d4a48865ba2b14a 2024-12-07T05:49:41,527 DEBUG [RS_OPEN_REGION-regionserver/16b2ce7304d2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:32879/user/jenkins/test-data/ead4b57f-f2f8-e3b0-3de7-9b12170920ed/data/default/TestHBaseWalOnEC/79a2dc6db850561a0d4a48865ba2b14a 2024-12-07T05:49:41,528 DEBUG [RS_OPEN_REGION-regionserver/16b2ce7304d2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 79a2dc6db850561a0d4a48865ba2b14a 2024-12-07T05:49:41,528 DEBUG [RS_OPEN_REGION-regionserver/16b2ce7304d2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 79a2dc6db850561a0d4a48865ba2b14a 2024-12-07T05:49:41,531 DEBUG [RS_OPEN_REGION-regionserver/16b2ce7304d2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 79a2dc6db850561a0d4a48865ba2b14a 2024-12-07T05:49:41,538 DEBUG [RS_OPEN_REGION-regionserver/16b2ce7304d2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:32879/user/jenkins/test-data/ead4b57f-f2f8-e3b0-3de7-9b12170920ed/data/default/TestHBaseWalOnEC/79a2dc6db850561a0d4a48865ba2b14a/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-07T05:49:41,539 INFO [RS_OPEN_REGION-regionserver/16b2ce7304d2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 79a2dc6db850561a0d4a48865ba2b14a; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=74626198, jitterRate=0.11201700568199158}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-07T05:49:41,539 DEBUG [RS_OPEN_REGION-regionserver/16b2ce7304d2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 79a2dc6db850561a0d4a48865ba2b14a 2024-12-07T05:49:41,540 DEBUG [RS_OPEN_REGION-regionserver/16b2ce7304d2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 79a2dc6db850561a0d4a48865ba2b14a: Running coprocessor pre-open hook at 1733550581519Writing region info on filesystem at 1733550581519Initializing all the Stores at 1733550581521 (+2 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733550581521Cleaning up temporary data from old regions at 1733550581528 (+7 ms)Running coprocessor post-open hooks at 1733550581539 (+11 ms)Region opened successfully at 1733550581540 (+1 ms) 2024-12-07T05:49:41,541 INFO [RS_OPEN_REGION-regionserver/16b2ce7304d2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestHBaseWalOnEC,,1733550580705.79a2dc6db850561a0d4a48865ba2b14a., pid=6, masterSystemTime=1733550581509 2024-12-07T05:49:41,544 DEBUG [RS_OPEN_REGION-regionserver/16b2ce7304d2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestHBaseWalOnEC,,1733550580705.79a2dc6db850561a0d4a48865ba2b14a. 2024-12-07T05:49:41,544 INFO [RS_OPEN_REGION-regionserver/16b2ce7304d2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestHBaseWalOnEC,,1733550580705.79a2dc6db850561a0d4a48865ba2b14a. 2024-12-07T05:49:41,546 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=79a2dc6db850561a0d4a48865ba2b14a, regionState=OPEN, openSeqNum=2, regionLocation=16b2ce7304d2,45785,1733550578218 2024-12-07T05:49:41,548 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 79a2dc6db850561a0d4a48865ba2b14a, server=16b2ce7304d2,45785,1733550578218 because future has completed 2024-12-07T05:49:41,554 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-07T05:49:41,554 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 79a2dc6db850561a0d4a48865ba2b14a, server=16b2ce7304d2,45785,1733550578218 in 196 msec 2024-12-07T05:49:41,558 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-07T05:49:41,558 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=79a2dc6db850561a0d4a48865ba2b14a, ASSIGN in 364 msec 2024-12-07T05:49:41,559 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-07T05:49:41,560 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733550581560"}]},"ts":"1733550581560"} 2024-12-07T05:49:41,564 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLED in hbase:meta 2024-12-07T05:49:41,566 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_POST_OPERATION 2024-12-07T05:49:41,569 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC in 855 msec 2024-12-07T05:49:41,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34827 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-07T05:49:41,867 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestHBaseWalOnEC completed 2024-12-07T05:49:41,868 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table TestHBaseWalOnEC get assigned. Timeout = 60000ms 2024-12-07T05:49:41,871 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-07T05:49:41,879 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table TestHBaseWalOnEC assigned to meta. Checking AM states. 2024-12-07T05:49:41,880 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-07T05:49:41,881 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table TestHBaseWalOnEC assigned. 2024-12-07T05:49:41,889 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestHBaseWalOnEC', row='row', locateType=CURRENT is [region=TestHBaseWalOnEC,,1733550580705.79a2dc6db850561a0d4a48865ba2b14a., hostname=16b2ce7304d2,45785,1733550578218, seqNum=2] 2024-12-07T05:49:41,898 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34827 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestHBaseWalOnEC 2024-12-07T05:49:41,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34827 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC 2024-12-07T05:49:41,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34827 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-07T05:49:41,904 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_PREPARE 2024-12-07T05:49:41,907 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-07T05:49:41,908 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-07T05:49:42,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34827 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-07T05:49:42,078 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45785 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-12-07T05:49:42,079 DEBUG [RS_FLUSH_OPERATIONS-regionserver/16b2ce7304d2:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestHBaseWalOnEC,,1733550580705.79a2dc6db850561a0d4a48865ba2b14a. 2024-12-07T05:49:42,083 INFO [RS_FLUSH_OPERATIONS-regionserver/16b2ce7304d2:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing 79a2dc6db850561a0d4a48865ba2b14a 1/1 column families, dataSize=32 B heapSize=360 B 2024-12-07T05:49:42,137 DEBUG [RS_FLUSH_OPERATIONS-regionserver/16b2ce7304d2:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:32879/user/jenkins/test-data/ead4b57f-f2f8-e3b0-3de7-9b12170920ed/data/default/TestHBaseWalOnEC/79a2dc6db850561a0d4a48865ba2b14a/.tmp/cf/0aa49fc0165e4018a56a54203b3293de is 36, key is row/cf:cq/1733550581891/Put/seqid=0 2024-12-07T05:49:42,144 WARN [RS_FLUSH_OPERATIONS-regionserver/16b2ce7304d2:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-07T05:49:42,144 WARN [RS_FLUSH_OPERATIONS-regionserver/16b2ce7304d2:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-07T05:49:42,147 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1201349948_22 at /127.0.0.1:55350 [Receiving block BP-779431921-172.17.0.2-1733550573781:blk_-9223372036854775648_1024] {}] datanode.DataXceiver(331): 127.0.0.1:37163:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:55350 dst: /127.0.0.1:37163 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T05:49:42,152 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37163 is added to blk_-9223372036854775648_1025 (size=4787) 2024-12-07T05:49:42,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34827 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-07T05:49:42,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34827 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-07T05:49:42,554 WARN [RS_FLUSH_OPERATIONS-regionserver/16b2ce7304d2:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-07T05:49:42,554 INFO [RS_FLUSH_OPERATIONS-regionserver/16b2ce7304d2:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=32 B at sequenceid=5 (bloomFilter=false), to=hdfs://localhost:32879/user/jenkins/test-data/ead4b57f-f2f8-e3b0-3de7-9b12170920ed/data/default/TestHBaseWalOnEC/79a2dc6db850561a0d4a48865ba2b14a/.tmp/cf/0aa49fc0165e4018a56a54203b3293de 2024-12-07T05:49:42,561 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44585 is added to blk_-9223372036854775725_1010 (size=34) 2024-12-07T05:49:42,562 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37163 is added to blk_-9223372036854775724_1010 (size=34) 2024-12-07T05:49:42,563 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44585 is added to blk_-9223372036854775709_1013 (size=1321) 2024-12-07T05:49:42,563 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37163 is added to blk_-9223372036854775708_1013 (size=1321) 2024-12-07T05:49:42,568 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37163 is added to blk_-9223372036854775756_1006 (size=196) 2024-12-07T05:49:42,568 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41373 is added to blk_-9223372036854775757_1006 (size=196) 2024-12-07T05:49:42,594 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41373 is added to blk_-9223372036854775772_1004 (size=42) 2024-12-07T05:49:42,594 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44585 is added to blk_-9223372036854775740_1008 (size=1189) 2024-12-07T05:49:42,595 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37163 is added to blk_-9223372036854775773_1004 (size=42) 2024-12-07T05:49:42,595 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37163 is added to blk_-9223372036854775741_1008 (size=1189) 2024-12-07T05:49:42,597 DEBUG [RS_FLUSH_OPERATIONS-regionserver/16b2ce7304d2:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:32879/user/jenkins/test-data/ead4b57f-f2f8-e3b0-3de7-9b12170920ed/data/default/TestHBaseWalOnEC/79a2dc6db850561a0d4a48865ba2b14a/.tmp/cf/0aa49fc0165e4018a56a54203b3293de as hdfs://localhost:32879/user/jenkins/test-data/ead4b57f-f2f8-e3b0-3de7-9b12170920ed/data/default/TestHBaseWalOnEC/79a2dc6db850561a0d4a48865ba2b14a/cf/0aa49fc0165e4018a56a54203b3293de 2024-12-07T05:49:42,607 INFO [RS_FLUSH_OPERATIONS-regionserver/16b2ce7304d2:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:32879/user/jenkins/test-data/ead4b57f-f2f8-e3b0-3de7-9b12170920ed/data/default/TestHBaseWalOnEC/79a2dc6db850561a0d4a48865ba2b14a/cf/0aa49fc0165e4018a56a54203b3293de, entries=1, sequenceid=5, filesize=4.7 K 2024-12-07T05:49:42,613 INFO [RS_FLUSH_OPERATIONS-regionserver/16b2ce7304d2:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~32 B/32, heapSize ~344 B/344, currentSize=0 B/0 for 79a2dc6db850561a0d4a48865ba2b14a in 530ms, sequenceid=5, compaction requested=false 2024-12-07T05:49:42,614 DEBUG [RS_FLUSH_OPERATIONS-regionserver/16b2ce7304d2:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestHBaseWalOnEC' 2024-12-07T05:49:42,616 DEBUG [RS_FLUSH_OPERATIONS-regionserver/16b2ce7304d2:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for 79a2dc6db850561a0d4a48865ba2b14a: 2024-12-07T05:49:42,616 DEBUG [RS_FLUSH_OPERATIONS-regionserver/16b2ce7304d2:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestHBaseWalOnEC,,1733550580705.79a2dc6db850561a0d4a48865ba2b14a. 2024-12-07T05:49:42,617 DEBUG [RS_FLUSH_OPERATIONS-regionserver/16b2ce7304d2:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-12-07T05:49:42,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34827 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-12-07T05:49:42,624 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-12-07T05:49:42,624 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 713 msec 2024-12-07T05:49:42,628 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC in 727 msec 2024-12-07T05:49:43,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34827 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-07T05:49:43,048 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestHBaseWalOnEC completed 2024-12-07T05:49:43,064 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-07T05:49:43,064 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-07T05:49:43,064 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-07T05:49:43,068 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T05:49:43,069 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T05:49:43,069 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-07T05:49:43,069 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-07T05:49:43,069 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=887671035, stopped=false 2024-12-07T05:49:43,069 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=16b2ce7304d2,34827,1733550577570 2024-12-07T05:49:43,138 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34827-0x101afa027650000, quorum=127.0.0.1:52563, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-07T05:49:43,138 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37395-0x101afa027650002, quorum=127.0.0.1:52563, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-07T05:49:43,138 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45785-0x101afa027650001, quorum=127.0.0.1:52563, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-07T05:49:43,138 DEBUG [pool-77-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43675-0x101afa027650003, quorum=127.0.0.1:52563, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-07T05:49:43,138 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37395-0x101afa027650002, quorum=127.0.0.1:52563, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T05:49:43,138 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45785-0x101afa027650001, quorum=127.0.0.1:52563, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T05:49:43,138 DEBUG [pool-77-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43675-0x101afa027650003, quorum=127.0.0.1:52563, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T05:49:43,138 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34827-0x101afa027650000, quorum=127.0.0.1:52563, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T05:49:43,139 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-07T05:49:43,140 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:45785-0x101afa027650001, quorum=127.0.0.1:52563, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-07T05:49:43,140 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:37395-0x101afa027650002, quorum=127.0.0.1:52563, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-07T05:49:43,140 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:43675-0x101afa027650003, quorum=127.0.0.1:52563, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-07T05:49:43,140 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-07T05:49:43,140 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:34827-0x101afa027650000, quorum=127.0.0.1:52563, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-07T05:49:43,141 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-07T05:49:43,141 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T05:49:43,142 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '16b2ce7304d2,45785,1733550578218' ***** 2024-12-07T05:49:43,142 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-07T05:49:43,142 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '16b2ce7304d2,37395,1733550578323' ***** 2024-12-07T05:49:43,142 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-07T05:49:43,143 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '16b2ce7304d2,43675,1733550578392' ***** 2024-12-07T05:49:43,143 INFO [RS:1;16b2ce7304d2:37395 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-07T05:49:43,143 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-07T05:49:43,144 INFO [RS:0;16b2ce7304d2:45785 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-07T05:49:43,144 INFO [RS:1;16b2ce7304d2:37395 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-07T05:49:43,144 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-07T05:49:43,144 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-07T05:49:43,144 INFO [RS:0;16b2ce7304d2:45785 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-07T05:49:43,144 INFO [RS:1;16b2ce7304d2:37395 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-07T05:49:43,144 INFO [RS:0;16b2ce7304d2:45785 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-07T05:49:43,144 INFO [RS:1;16b2ce7304d2:37395 {}] regionserver.HRegionServer(959): stopping server 16b2ce7304d2,37395,1733550578323 2024-12-07T05:49:43,144 INFO [RS:2;16b2ce7304d2:43675 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-07T05:49:43,145 INFO [RS:1;16b2ce7304d2:37395 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-07T05:49:43,145 INFO [RS:0;16b2ce7304d2:45785 {}] regionserver.HRegionServer(3091): Received CLOSE for 79a2dc6db850561a0d4a48865ba2b14a 2024-12-07T05:49:43,145 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-07T05:49:43,145 INFO [RS:1;16b2ce7304d2:37395 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;16b2ce7304d2:37395. 2024-12-07T05:49:43,145 INFO [RS:2;16b2ce7304d2:43675 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-07T05:49:43,145 DEBUG [RS:1;16b2ce7304d2:37395 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-07T05:49:43,145 INFO [RS:2;16b2ce7304d2:43675 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-07T05:49:43,145 DEBUG [RS:1;16b2ce7304d2:37395 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T05:49:43,145 INFO [RS:2;16b2ce7304d2:43675 {}] regionserver.HRegionServer(959): stopping server 16b2ce7304d2,43675,1733550578392 2024-12-07T05:49:43,145 INFO [RS:2;16b2ce7304d2:43675 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-07T05:49:43,145 INFO [RS:2;16b2ce7304d2:43675 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:2;16b2ce7304d2:43675. 2024-12-07T05:49:43,145 INFO [RS:1;16b2ce7304d2:37395 {}] regionserver.HRegionServer(976): stopping server 16b2ce7304d2,37395,1733550578323; all regions closed. 2024-12-07T05:49:43,145 DEBUG [RS:2;16b2ce7304d2:43675 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-07T05:49:43,145 INFO [RS:0;16b2ce7304d2:45785 {}] regionserver.HRegionServer(959): stopping server 16b2ce7304d2,45785,1733550578218 2024-12-07T05:49:43,145 DEBUG [RS:2;16b2ce7304d2:43675 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T05:49:43,145 INFO [RS:0;16b2ce7304d2:45785 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-07T05:49:43,145 INFO [RS:0;16b2ce7304d2:45785 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;16b2ce7304d2:45785. 2024-12-07T05:49:43,145 INFO [RS:2;16b2ce7304d2:43675 {}] regionserver.HRegionServer(976): stopping server 16b2ce7304d2,43675,1733550578392; all regions closed. 2024-12-07T05:49:43,146 DEBUG [RS:0;16b2ce7304d2:45785 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-07T05:49:43,146 DEBUG [RS:0;16b2ce7304d2:45785 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T05:49:43,146 INFO [RS:0;16b2ce7304d2:45785 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-07T05:49:43,146 DEBUG [RS_CLOSE_REGION-regionserver/16b2ce7304d2:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 79a2dc6db850561a0d4a48865ba2b14a, disabling compactions & flushes 2024-12-07T05:49:43,146 INFO [RS:0;16b2ce7304d2:45785 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-07T05:49:43,146 INFO [RS:0;16b2ce7304d2:45785 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-07T05:49:43,146 INFO [RS_CLOSE_REGION-regionserver/16b2ce7304d2:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1733550580705.79a2dc6db850561a0d4a48865ba2b14a. 2024-12-07T05:49:43,146 DEBUG [RS_CLOSE_REGION-regionserver/16b2ce7304d2:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1733550580705.79a2dc6db850561a0d4a48865ba2b14a. 2024-12-07T05:49:43,146 INFO [RS:0;16b2ce7304d2:45785 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-07T05:49:43,146 DEBUG [RS_CLOSE_REGION-regionserver/16b2ce7304d2:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1733550580705.79a2dc6db850561a0d4a48865ba2b14a. after waiting 0 ms 2024-12-07T05:49:43,146 DEBUG [RS_CLOSE_REGION-regionserver/16b2ce7304d2:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1733550580705.79a2dc6db850561a0d4a48865ba2b14a. 2024-12-07T05:49:43,147 INFO [RS:0;16b2ce7304d2:45785 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-12-07T05:49:43,147 DEBUG [RS_CLOSE_META-regionserver/16b2ce7304d2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-07T05:49:43,147 DEBUG [RS:0;16b2ce7304d2:45785 {}] regionserver.HRegionServer(1325): Online Regions={79a2dc6db850561a0d4a48865ba2b14a=TestHBaseWalOnEC,,1733550580705.79a2dc6db850561a0d4a48865ba2b14a., 1588230740=hbase:meta,,1.1588230740} 2024-12-07T05:49:43,147 INFO [RS_CLOSE_META-regionserver/16b2ce7304d2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-07T05:49:43,147 DEBUG [RS_CLOSE_META-regionserver/16b2ce7304d2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-07T05:49:43,148 DEBUG [RS_CLOSE_META-regionserver/16b2ce7304d2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-07T05:49:43,148 DEBUG [RS_CLOSE_META-regionserver/16b2ce7304d2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-07T05:49:43,148 DEBUG [RS:0;16b2ce7304d2:45785 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 79a2dc6db850561a0d4a48865ba2b14a 2024-12-07T05:49:43,148 INFO [RS_CLOSE_META-regionserver/16b2ce7304d2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.34 KB heapSize=3.38 KB 2024-12-07T05:49:43,152 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44585 is added to blk_1073741827_1017 (size=93) 2024-12-07T05:49:43,152 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37163 is added to blk_1073741828_1018 (size=93) 2024-12-07T05:49:43,152 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37163 is added to blk_1073741827_1017 (size=93) 2024-12-07T05:49:43,152 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41373 is added to blk_1073741828_1018 (size=93) 2024-12-07T05:49:43,153 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44585 is added to blk_1073741828_1018 (size=93) 2024-12-07T05:49:43,153 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41373 is added to blk_1073741827_1017 (size=93) 2024-12-07T05:49:43,160 DEBUG [RS:2;16b2ce7304d2:43675 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/ead4b57f-f2f8-e3b0-3de7-9b12170920ed/oldWALs 2024-12-07T05:49:43,160 DEBUG [RS:1;16b2ce7304d2:37395 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/ead4b57f-f2f8-e3b0-3de7-9b12170920ed/oldWALs 2024-12-07T05:49:43,160 INFO [RS:2;16b2ce7304d2:43675 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 16b2ce7304d2%2C43675%2C1733550578392:(num 1733550579871) 2024-12-07T05:49:43,160 INFO [RS:1;16b2ce7304d2:37395 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 16b2ce7304d2%2C37395%2C1733550578323:(num 1733550579871) 2024-12-07T05:49:43,160 DEBUG [RS:2;16b2ce7304d2:43675 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T05:49:43,160 DEBUG [RS:1;16b2ce7304d2:37395 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T05:49:43,160 INFO [RS:2;16b2ce7304d2:43675 {}] regionserver.LeaseManager(133): Closed leases 2024-12-07T05:49:43,160 INFO [RS:1;16b2ce7304d2:37395 {}] regionserver.LeaseManager(133): Closed leases 2024-12-07T05:49:43,160 INFO [RS:2;16b2ce7304d2:43675 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-07T05:49:43,160 INFO [RS:1;16b2ce7304d2:37395 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-07T05:49:43,161 INFO [RS:2;16b2ce7304d2:43675 {}] hbase.ChoreService(370): Chore service for: regionserver/16b2ce7304d2:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-07T05:49:43,161 INFO [RS:1;16b2ce7304d2:37395 {}] hbase.ChoreService(370): Chore service for: regionserver/16b2ce7304d2:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-12-07T05:49:43,161 INFO [RS:2;16b2ce7304d2:43675 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-07T05:49:43,161 INFO [RS:1;16b2ce7304d2:37395 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-07T05:49:43,161 INFO [RS:2;16b2ce7304d2:43675 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-07T05:49:43,161 INFO [regionserver/16b2ce7304d2:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-07T05:49:43,161 INFO [RS:2;16b2ce7304d2:43675 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-07T05:49:43,161 INFO [RS:1;16b2ce7304d2:37395 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-07T05:49:43,161 INFO [RS:2;16b2ce7304d2:43675 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-07T05:49:43,161 INFO [RS:1;16b2ce7304d2:37395 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-07T05:49:43,161 INFO [regionserver/16b2ce7304d2:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-07T05:49:43,161 INFO [RS:1;16b2ce7304d2:37395 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-07T05:49:43,161 INFO [RS:2;16b2ce7304d2:43675 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:43675 2024-12-07T05:49:43,161 INFO [RS:1;16b2ce7304d2:37395 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:37395 2024-12-07T05:49:43,169 DEBUG [RS_CLOSE_REGION-regionserver/16b2ce7304d2:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:32879/user/jenkins/test-data/ead4b57f-f2f8-e3b0-3de7-9b12170920ed/data/default/TestHBaseWalOnEC/79a2dc6db850561a0d4a48865ba2b14a/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-07T05:49:43,170 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37395-0x101afa027650002, quorum=127.0.0.1:52563, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/16b2ce7304d2,37395,1733550578323 2024-12-07T05:49:43,170 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34827-0x101afa027650000, quorum=127.0.0.1:52563, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-07T05:49:43,170 INFO [RS:1;16b2ce7304d2:37395 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-07T05:49:43,170 DEBUG [pool-77-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43675-0x101afa027650003, quorum=127.0.0.1:52563, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/16b2ce7304d2,43675,1733550578392 2024-12-07T05:49:43,171 INFO [RS:2;16b2ce7304d2:43675 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-07T05:49:43,171 INFO [RS_CLOSE_REGION-regionserver/16b2ce7304d2:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1733550580705.79a2dc6db850561a0d4a48865ba2b14a. 2024-12-07T05:49:43,171 DEBUG [RS_CLOSE_REGION-regionserver/16b2ce7304d2:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 79a2dc6db850561a0d4a48865ba2b14a: Waiting for close lock at 1733550583145Running coprocessor pre-close hooks at 1733550583146 (+1 ms)Disabling compacts and flushes for region at 1733550583146Disabling writes for close at 1733550583146Writing region close event to WAL at 1733550583148 (+2 ms)Running coprocessor post-close hooks at 1733550583170 (+22 ms)Closed at 1733550583171 (+1 ms) 2024-12-07T05:49:43,171 ERROR [pool-77-thread-1-EventThread {}] zookeeper.ClientCnxn$EventThread(581): Error while calling watcher. java.util.concurrent.RejectedExecutionException: Task org.apache.hadoop.hbase.trace.TraceUtil$$Lambda$370/0x00007fa9c08f4f20@64d5f0f1 rejected from java.util.concurrent.ThreadPoolExecutor@21d7720f[Terminated, pool size = 0, active threads = 0, queued tasks = 0, completed tasks = 14] at java.util.concurrent.ThreadPoolExecutor$AbortPolicy.rejectedExecution(ThreadPoolExecutor.java:2065) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.reject(ThreadPoolExecutor.java:833) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.execute(ThreadPoolExecutor.java:1360) ~[?:?] at java.util.concurrent.Executors$DelegatedExecutorService.execute(Executors.java:721) ~[?:?] at org.apache.hadoop.hbase.zookeeper.ZKWatcher.process(ZKWatcher.java:613) ~[hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.zookeeper.ClientCnxn$EventThread.processEvent(ClientCnxn.java:579) ~[zookeeper-3.8.4.jar:3.8.4] at org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:554) ~[zookeeper-3.8.4.jar:3.8.4] 2024-12-07T05:49:43,172 DEBUG [RS_CLOSE_REGION-regionserver/16b2ce7304d2:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestHBaseWalOnEC,,1733550580705.79a2dc6db850561a0d4a48865ba2b14a. 2024-12-07T05:49:43,179 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [16b2ce7304d2,43675,1733550578392] 2024-12-07T05:49:43,184 DEBUG [RS_CLOSE_META-regionserver/16b2ce7304d2:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:32879/user/jenkins/test-data/ead4b57f-f2f8-e3b0-3de7-9b12170920ed/data/hbase/meta/1588230740/.tmp/info/49a8e13b523e4afd8d495a2c91c004fb is 153, key is TestHBaseWalOnEC,,1733550580705.79a2dc6db850561a0d4a48865ba2b14a./info:regioninfo/1733550581545/Put/seqid=0 2024-12-07T05:49:43,187 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/16b2ce7304d2,43675,1733550578392 already deleted, retry=false 2024-12-07T05:49:43,187 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 16b2ce7304d2,43675,1733550578392 expired; onlineServers=2 2024-12-07T05:49:43,187 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [16b2ce7304d2,37395,1733550578323] 2024-12-07T05:49:43,188 WARN [RS_CLOSE_META-regionserver/16b2ce7304d2:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-07T05:49:43,188 WARN [RS_CLOSE_META-regionserver/16b2ce7304d2:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-07T05:49:43,192 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1201349948_22 at /127.0.0.1:36464 [Receiving block BP-779431921-172.17.0.2-1733550573781:blk_-9223372036854775632_1026] {}] datanode.DataXceiver(331): 127.0.0.1:44585:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36464 dst: /127.0.0.1:44585 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T05:49:43,195 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/16b2ce7304d2,37395,1733550578323 already deleted, retry=false 2024-12-07T05:49:43,195 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 16b2ce7304d2,37395,1733550578323 expired; onlineServers=1 2024-12-07T05:49:43,196 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44585 is added to blk_-9223372036854775632_1027 (size=6637) 2024-12-07T05:49:43,197 WARN [RS_CLOSE_META-regionserver/16b2ce7304d2:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-07T05:49:43,197 INFO [RS_CLOSE_META-regionserver/16b2ce7304d2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.18 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:32879/user/jenkins/test-data/ead4b57f-f2f8-e3b0-3de7-9b12170920ed/data/hbase/meta/1588230740/.tmp/info/49a8e13b523e4afd8d495a2c91c004fb 2024-12-07T05:49:43,218 INFO [regionserver/16b2ce7304d2:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-07T05:49:43,222 INFO [regionserver/16b2ce7304d2:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-07T05:49:43,222 DEBUG [RS_CLOSE_META-regionserver/16b2ce7304d2:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:32879/user/jenkins/test-data/ead4b57f-f2f8-e3b0-3de7-9b12170920ed/data/hbase/meta/1588230740/.tmp/ns/36e48a5f0c9e46d58c6c051f7b449bc3 is 43, key is default/ns:d/1733550580451/Put/seqid=0 2024-12-07T05:49:43,225 WARN [RS_CLOSE_META-regionserver/16b2ce7304d2:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-07T05:49:43,225 WARN [RS_CLOSE_META-regionserver/16b2ce7304d2:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-07T05:49:43,225 INFO [regionserver/16b2ce7304d2:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-07T05:49:43,228 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1201349948_22 at /127.0.0.1:36474 [Receiving block BP-779431921-172.17.0.2-1733550573781:blk_-9223372036854775616_1028] {}] datanode.DataXceiver(331): 127.0.0.1:44585:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36474 dst: /127.0.0.1:44585 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T05:49:43,232 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44585 is added to blk_-9223372036854775616_1029 (size=5153) 2024-12-07T05:49:43,233 WARN [RS_CLOSE_META-regionserver/16b2ce7304d2:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-07T05:49:43,233 INFO [RS_CLOSE_META-regionserver/16b2ce7304d2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:32879/user/jenkins/test-data/ead4b57f-f2f8-e3b0-3de7-9b12170920ed/data/hbase/meta/1588230740/.tmp/ns/36e48a5f0c9e46d58c6c051f7b449bc3 2024-12-07T05:49:43,256 DEBUG [RS_CLOSE_META-regionserver/16b2ce7304d2:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:32879/user/jenkins/test-data/ead4b57f-f2f8-e3b0-3de7-9b12170920ed/data/hbase/meta/1588230740/.tmp/table/92b55f1597b348289f89d7810e913935 is 52, key is TestHBaseWalOnEC/table:state/1733550581560/Put/seqid=0 2024-12-07T05:49:43,258 WARN [RS_CLOSE_META-regionserver/16b2ce7304d2:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-07T05:49:43,258 WARN [RS_CLOSE_META-regionserver/16b2ce7304d2:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-07T05:49:43,261 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1201349948_22 at /127.0.0.1:58368 [Receiving block BP-779431921-172.17.0.2-1733550573781:blk_-9223372036854775600_1030] {}] datanode.DataXceiver(331): 127.0.0.1:41373:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:58368 dst: /127.0.0.1:41373 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T05:49:43,264 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41373 is added to blk_-9223372036854775600_1031 (size=5249) 2024-12-07T05:49:43,265 WARN [RS_CLOSE_META-regionserver/16b2ce7304d2:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-07T05:49:43,265 INFO [RS_CLOSE_META-regionserver/16b2ce7304d2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=96 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:32879/user/jenkins/test-data/ead4b57f-f2f8-e3b0-3de7-9b12170920ed/data/hbase/meta/1588230740/.tmp/table/92b55f1597b348289f89d7810e913935 2024-12-07T05:49:43,276 DEBUG [RS_CLOSE_META-regionserver/16b2ce7304d2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:32879/user/jenkins/test-data/ead4b57f-f2f8-e3b0-3de7-9b12170920ed/data/hbase/meta/1588230740/.tmp/info/49a8e13b523e4afd8d495a2c91c004fb as hdfs://localhost:32879/user/jenkins/test-data/ead4b57f-f2f8-e3b0-3de7-9b12170920ed/data/hbase/meta/1588230740/info/49a8e13b523e4afd8d495a2c91c004fb 2024-12-07T05:49:43,279 DEBUG [pool-77-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43675-0x101afa027650003, quorum=127.0.0.1:52563, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-07T05:49:43,279 DEBUG [pool-77-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43675-0x101afa027650003, quorum=127.0.0.1:52563, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-07T05:49:43,280 INFO [RS:1;16b2ce7304d2:37395 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-07T05:49:43,280 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37395-0x101afa027650002, quorum=127.0.0.1:52563, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-07T05:49:43,280 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37395-0x101afa027650002, quorum=127.0.0.1:52563, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-07T05:49:43,280 INFO [RS:1;16b2ce7304d2:37395 {}] regionserver.HRegionServer(1031): Exiting; stopping=16b2ce7304d2,37395,1733550578323; zookeeper connection closed. 2024-12-07T05:49:43,280 INFO [RS:2;16b2ce7304d2:43675 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-07T05:49:43,280 INFO [RS:2;16b2ce7304d2:43675 {}] regionserver.HRegionServer(1031): Exiting; stopping=16b2ce7304d2,43675,1733550578392; zookeeper connection closed. 2024-12-07T05:49:43,280 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@4a00a0fe {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@4a00a0fe 2024-12-07T05:49:43,280 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@67a6a905 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@67a6a905 2024-12-07T05:49:43,286 INFO [RS_CLOSE_META-regionserver/16b2ce7304d2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:32879/user/jenkins/test-data/ead4b57f-f2f8-e3b0-3de7-9b12170920ed/data/hbase/meta/1588230740/info/49a8e13b523e4afd8d495a2c91c004fb, entries=10, sequenceid=11, filesize=6.5 K 2024-12-07T05:49:43,288 DEBUG [RS_CLOSE_META-regionserver/16b2ce7304d2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:32879/user/jenkins/test-data/ead4b57f-f2f8-e3b0-3de7-9b12170920ed/data/hbase/meta/1588230740/.tmp/ns/36e48a5f0c9e46d58c6c051f7b449bc3 as hdfs://localhost:32879/user/jenkins/test-data/ead4b57f-f2f8-e3b0-3de7-9b12170920ed/data/hbase/meta/1588230740/ns/36e48a5f0c9e46d58c6c051f7b449bc3 2024-12-07T05:49:43,297 INFO [RS_CLOSE_META-regionserver/16b2ce7304d2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:32879/user/jenkins/test-data/ead4b57f-f2f8-e3b0-3de7-9b12170920ed/data/hbase/meta/1588230740/ns/36e48a5f0c9e46d58c6c051f7b449bc3, entries=2, sequenceid=11, filesize=5.0 K 2024-12-07T05:49:43,299 DEBUG [RS_CLOSE_META-regionserver/16b2ce7304d2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:32879/user/jenkins/test-data/ead4b57f-f2f8-e3b0-3de7-9b12170920ed/data/hbase/meta/1588230740/.tmp/table/92b55f1597b348289f89d7810e913935 as hdfs://localhost:32879/user/jenkins/test-data/ead4b57f-f2f8-e3b0-3de7-9b12170920ed/data/hbase/meta/1588230740/table/92b55f1597b348289f89d7810e913935 2024-12-07T05:49:43,309 INFO [RS_CLOSE_META-regionserver/16b2ce7304d2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:32879/user/jenkins/test-data/ead4b57f-f2f8-e3b0-3de7-9b12170920ed/data/hbase/meta/1588230740/table/92b55f1597b348289f89d7810e913935, entries=2, sequenceid=11, filesize=5.1 K 2024-12-07T05:49:43,311 INFO [RS_CLOSE_META-regionserver/16b2ce7304d2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 163ms, sequenceid=11, compaction requested=false 2024-12-07T05:49:43,311 DEBUG [RS_CLOSE_META-regionserver/16b2ce7304d2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-07T05:49:43,319 DEBUG [RS_CLOSE_META-regionserver/16b2ce7304d2:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:32879/user/jenkins/test-data/ead4b57f-f2f8-e3b0-3de7-9b12170920ed/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-12-07T05:49:43,320 DEBUG [RS_CLOSE_META-regionserver/16b2ce7304d2:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-07T05:49:43,320 INFO [RS_CLOSE_META-regionserver/16b2ce7304d2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-07T05:49:43,321 DEBUG [RS_CLOSE_META-regionserver/16b2ce7304d2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733550583147Running coprocessor pre-close hooks at 1733550583147Disabling compacts and flushes for region at 1733550583147Disabling writes for close at 1733550583148 (+1 ms)Obtaining lock to block concurrent updates at 1733550583148Preparing flush snapshotting stores in 1588230740 at 1733550583148Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1377, getHeapSize=3392, getOffHeapSize=0, getCellsCount=14 at 1733550583149 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1733550583150 (+1 ms)Flushing 1588230740/info: creating writer at 1733550583151 (+1 ms)Flushing 1588230740/info: appending metadata at 1733550583180 (+29 ms)Flushing 1588230740/info: closing flushed file at 1733550583180Flushing 1588230740/ns: creating writer at 1733550583207 (+27 ms)Flushing 1588230740/ns: appending metadata at 1733550583222 (+15 ms)Flushing 1588230740/ns: closing flushed file at 1733550583222Flushing 1588230740/table: creating writer at 1733550583241 (+19 ms)Flushing 1588230740/table: appending metadata at 1733550583255 (+14 ms)Flushing 1588230740/table: closing flushed file at 1733550583255Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@bc28c2e: reopening flushed file at 1733550583275 (+20 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3cf8e0de: reopening flushed file at 1733550583286 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@461bc359: reopening flushed file at 1733550583298 (+12 ms)Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 163ms, sequenceid=11, compaction requested=false at 1733550583311 (+13 ms)Writing region close event to WAL at 1733550583313 (+2 ms)Running coprocessor post-close hooks at 1733550583320 (+7 ms)Closed at 1733550583320 2024-12-07T05:49:43,321 DEBUG [RS_CLOSE_META-regionserver/16b2ce7304d2:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-07T05:49:43,348 INFO [RS:0;16b2ce7304d2:45785 {}] regionserver.HRegionServer(976): stopping server 16b2ce7304d2,45785,1733550578218; all regions closed. 2024-12-07T05:49:43,353 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41373 is added to blk_1073741829_1019 (size=2751) 2024-12-07T05:49:43,353 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37163 is added to blk_1073741829_1019 (size=2751) 2024-12-07T05:49:43,353 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44585 is added to blk_1073741829_1019 (size=2751) 2024-12-07T05:49:43,359 DEBUG [RS:0;16b2ce7304d2:45785 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/ead4b57f-f2f8-e3b0-3de7-9b12170920ed/oldWALs 2024-12-07T05:49:43,359 INFO [RS:0;16b2ce7304d2:45785 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 16b2ce7304d2%2C45785%2C1733550578218.meta:.meta(num 1733550580281) 2024-12-07T05:49:43,363 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41373 is added to blk_1073741826_1016 (size=1298) 2024-12-07T05:49:43,363 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37163 is added to blk_1073741826_1016 (size=1298) 2024-12-07T05:49:43,363 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44585 is added to blk_1073741826_1016 (size=1298) 2024-12-07T05:49:43,366 DEBUG [RS:0;16b2ce7304d2:45785 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/ead4b57f-f2f8-e3b0-3de7-9b12170920ed/oldWALs 2024-12-07T05:49:43,366 INFO [RS:0;16b2ce7304d2:45785 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 16b2ce7304d2%2C45785%2C1733550578218:(num 1733550579870) 2024-12-07T05:49:43,366 DEBUG [RS:0;16b2ce7304d2:45785 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T05:49:43,366 INFO [RS:0;16b2ce7304d2:45785 {}] regionserver.LeaseManager(133): Closed leases 2024-12-07T05:49:43,366 INFO [RS:0;16b2ce7304d2:45785 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-07T05:49:43,367 INFO [RS:0;16b2ce7304d2:45785 {}] hbase.ChoreService(370): Chore service for: regionserver/16b2ce7304d2:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-07T05:49:43,367 INFO [RS:0;16b2ce7304d2:45785 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-07T05:49:43,367 INFO [regionserver/16b2ce7304d2:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-07T05:49:43,367 INFO [RS:0;16b2ce7304d2:45785 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:45785 2024-12-07T05:49:43,387 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45785-0x101afa027650001, quorum=127.0.0.1:52563, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/16b2ce7304d2,45785,1733550578218 2024-12-07T05:49:43,387 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34827-0x101afa027650000, quorum=127.0.0.1:52563, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-07T05:49:43,387 INFO [RS:0;16b2ce7304d2:45785 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-07T05:49:43,387 ERROR [Time-limited test-EventThread {}] zookeeper.ClientCnxn$EventThread(581): Error while calling watcher. java.util.concurrent.RejectedExecutionException: Task org.apache.hadoop.hbase.trace.TraceUtil$$Lambda$370/0x00007fa9c08f4f20@2b91ded9 rejected from java.util.concurrent.ThreadPoolExecutor@2fae5f73[Shutting down, pool size = 1, active threads = 0, queued tasks = 0, completed tasks = 14] at java.util.concurrent.ThreadPoolExecutor$AbortPolicy.rejectedExecution(ThreadPoolExecutor.java:2065) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.reject(ThreadPoolExecutor.java:833) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.execute(ThreadPoolExecutor.java:1360) ~[?:?] at java.util.concurrent.Executors$DelegatedExecutorService.execute(Executors.java:721) ~[?:?] at org.apache.hadoop.hbase.zookeeper.ZKWatcher.process(ZKWatcher.java:613) ~[hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.zookeeper.ClientCnxn$EventThread.processEvent(ClientCnxn.java:579) ~[zookeeper-3.8.4.jar:3.8.4] at org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:554) ~[zookeeper-3.8.4.jar:3.8.4] 2024-12-07T05:49:43,395 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [16b2ce7304d2,45785,1733550578218] 2024-12-07T05:49:43,404 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/16b2ce7304d2,45785,1733550578218 already deleted, retry=false 2024-12-07T05:49:43,404 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 16b2ce7304d2,45785,1733550578218 expired; onlineServers=0 2024-12-07T05:49:43,404 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '16b2ce7304d2,34827,1733550577570' ***** 2024-12-07T05:49:43,404 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-07T05:49:43,404 INFO [M:0;16b2ce7304d2:34827 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-07T05:49:43,404 INFO [M:0;16b2ce7304d2:34827 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-07T05:49:43,404 DEBUG [M:0;16b2ce7304d2:34827 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-07T05:49:43,404 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-07T05:49:43,404 DEBUG [M:0;16b2ce7304d2:34827 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-07T05:49:43,404 DEBUG [master/16b2ce7304d2:0:becomeActiveMaster-HFileCleaner.small.0-1733550579459 {}] cleaner.HFileCleaner(306): Exit Thread[master/16b2ce7304d2:0:becomeActiveMaster-HFileCleaner.small.0-1733550579459,5,FailOnTimeoutGroup] 2024-12-07T05:49:43,404 DEBUG [master/16b2ce7304d2:0:becomeActiveMaster-HFileCleaner.large.0-1733550579454 {}] cleaner.HFileCleaner(306): Exit Thread[master/16b2ce7304d2:0:becomeActiveMaster-HFileCleaner.large.0-1733550579454,5,FailOnTimeoutGroup] 2024-12-07T05:49:43,405 INFO [M:0;16b2ce7304d2:34827 {}] hbase.ChoreService(370): Chore service for: master/16b2ce7304d2:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-07T05:49:43,405 INFO [M:0;16b2ce7304d2:34827 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-07T05:49:43,405 DEBUG [M:0;16b2ce7304d2:34827 {}] master.HMaster(1795): Stopping service threads 2024-12-07T05:49:43,405 INFO [M:0;16b2ce7304d2:34827 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-07T05:49:43,405 INFO [M:0;16b2ce7304d2:34827 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-07T05:49:43,406 INFO [M:0;16b2ce7304d2:34827 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-07T05:49:43,406 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-07T05:49:43,412 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34827-0x101afa027650000, quorum=127.0.0.1:52563, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-07T05:49:43,412 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34827-0x101afa027650000, quorum=127.0.0.1:52563, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T05:49:43,412 DEBUG [M:0;16b2ce7304d2:34827 {}] zookeeper.ZKUtil(347): master:34827-0x101afa027650000, quorum=127.0.0.1:52563, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-07T05:49:43,413 WARN [M:0;16b2ce7304d2:34827 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-07T05:49:43,414 INFO [M:0;16b2ce7304d2:34827 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:32879/user/jenkins/test-data/ead4b57f-f2f8-e3b0-3de7-9b12170920ed/.lastflushedseqids 2024-12-07T05:49:43,425 WARN [M:0;16b2ce7304d2:34827 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-07T05:49:43,425 WARN [M:0;16b2ce7304d2:34827 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-07T05:49:43,427 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1395986970_22 at /127.0.0.1:36490 [Receiving block BP-779431921-172.17.0.2-1733550573781:blk_-9223372036854775584_1032] {}] datanode.DataXceiver(331): 127.0.0.1:44585:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36490 dst: /127.0.0.1:44585 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T05:49:43,430 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44585 is added to blk_-9223372036854775584_1033 (size=127) 2024-12-07T05:49:43,431 WARN [M:0;16b2ce7304d2:34827 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-07T05:49:43,431 INFO [M:0;16b2ce7304d2:34827 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-07T05:49:43,431 INFO [M:0;16b2ce7304d2:34827 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-07T05:49:43,431 DEBUG [M:0;16b2ce7304d2:34827 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-07T05:49:43,431 INFO [M:0;16b2ce7304d2:34827 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T05:49:43,431 DEBUG [M:0;16b2ce7304d2:34827 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T05:49:43,431 DEBUG [M:0;16b2ce7304d2:34827 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-07T05:49:43,431 DEBUG [M:0;16b2ce7304d2:34827 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T05:49:43,432 INFO [M:0;16b2ce7304d2:34827 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=26.82 KB heapSize=34.11 KB 2024-12-07T05:49:43,452 DEBUG [M:0;16b2ce7304d2:34827 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:32879/user/jenkins/test-data/ead4b57f-f2f8-e3b0-3de7-9b12170920ed/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/a3c3476ed90641fa83d3e589d7c3ebbe is 82, key is hbase:meta,,1/info:regioninfo/1733550580357/Put/seqid=0 2024-12-07T05:49:43,454 WARN [M:0;16b2ce7304d2:34827 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-07T05:49:43,454 WARN [M:0;16b2ce7304d2:34827 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-07T05:49:43,459 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1395986970_22 at /127.0.0.1:55436 [Receiving block BP-779431921-172.17.0.2-1733550573781:blk_-9223372036854775568_1034] {}] datanode.DataXceiver(331): 127.0.0.1:37163:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:55436 dst: /127.0.0.1:37163 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T05:49:43,462 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37163 is added to blk_-9223372036854775568_1035 (size=5672) 2024-12-07T05:49:43,463 WARN [M:0;16b2ce7304d2:34827 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-07T05:49:43,463 INFO [M:0;16b2ce7304d2:34827 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:32879/user/jenkins/test-data/ead4b57f-f2f8-e3b0-3de7-9b12170920ed/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/a3c3476ed90641fa83d3e589d7c3ebbe 2024-12-07T05:49:43,487 DEBUG [M:0;16b2ce7304d2:34827 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:32879/user/jenkins/test-data/ead4b57f-f2f8-e3b0-3de7-9b12170920ed/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/4afe2aa5663d4a6492fb90218971cf6a is 747, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1733550581568/Put/seqid=0 2024-12-07T05:49:43,490 WARN [M:0;16b2ce7304d2:34827 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-07T05:49:43,490 WARN [M:0;16b2ce7304d2:34827 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-07T05:49:43,492 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1395986970_22 at /127.0.0.1:55462 [Receiving block BP-779431921-172.17.0.2-1733550573781:blk_-9223372036854775552_1036] {}] datanode.DataXceiver(331): 127.0.0.1:37163:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:55462 dst: /127.0.0.1:37163 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T05:49:43,496 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45785-0x101afa027650001, quorum=127.0.0.1:52563, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-07T05:49:43,496 INFO [RS:0;16b2ce7304d2:45785 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-07T05:49:43,496 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45785-0x101afa027650001, quorum=127.0.0.1:52563, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-07T05:49:43,496 INFO [RS:0;16b2ce7304d2:45785 {}] regionserver.HRegionServer(1031): Exiting; stopping=16b2ce7304d2,45785,1733550578218; zookeeper connection closed. 2024-12-07T05:49:43,496 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37163 is added to blk_-9223372036854775552_1037 (size=6438) 2024-12-07T05:49:43,496 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@4d2e6a8b {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@4d2e6a8b 2024-12-07T05:49:43,497 WARN [M:0;16b2ce7304d2:34827 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-07T05:49:43,497 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 3 regionserver(s) complete 2024-12-07T05:49:43,497 INFO [M:0;16b2ce7304d2:34827 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.13 KB at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:32879/user/jenkins/test-data/ead4b57f-f2f8-e3b0-3de7-9b12170920ed/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/4afe2aa5663d4a6492fb90218971cf6a 2024-12-07T05:49:43,519 DEBUG [M:0;16b2ce7304d2:34827 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:32879/user/jenkins/test-data/ead4b57f-f2f8-e3b0-3de7-9b12170920ed/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/bf841ff183e94572a6aae4b887980cf5 is 69, key is 16b2ce7304d2,37395,1733550578323/rs:state/1733550579583/Put/seqid=0 2024-12-07T05:49:43,521 WARN [M:0;16b2ce7304d2:34827 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-07T05:49:43,521 WARN [M:0;16b2ce7304d2:34827 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-07T05:49:43,524 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1395986970_22 at /127.0.0.1:36516 [Receiving block BP-779431921-172.17.0.2-1733550573781:blk_-9223372036854775536_1038] {}] datanode.DataXceiver(331): 127.0.0.1:44585:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36516 dst: /127.0.0.1:44585 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T05:49:43,530 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44585 is added to blk_-9223372036854775536_1039 (size=5294) 2024-12-07T05:49:43,530 WARN [M:0;16b2ce7304d2:34827 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-07T05:49:43,531 INFO [M:0;16b2ce7304d2:34827 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=195 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:32879/user/jenkins/test-data/ead4b57f-f2f8-e3b0-3de7-9b12170920ed/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/bf841ff183e94572a6aae4b887980cf5 2024-12-07T05:49:43,539 DEBUG [M:0;16b2ce7304d2:34827 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:32879/user/jenkins/test-data/ead4b57f-f2f8-e3b0-3de7-9b12170920ed/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/a3c3476ed90641fa83d3e589d7c3ebbe as hdfs://localhost:32879/user/jenkins/test-data/ead4b57f-f2f8-e3b0-3de7-9b12170920ed/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/a3c3476ed90641fa83d3e589d7c3ebbe 2024-12-07T05:49:43,547 INFO [M:0;16b2ce7304d2:34827 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:32879/user/jenkins/test-data/ead4b57f-f2f8-e3b0-3de7-9b12170920ed/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/a3c3476ed90641fa83d3e589d7c3ebbe, entries=8, sequenceid=72, filesize=5.5 K 2024-12-07T05:49:43,549 DEBUG [M:0;16b2ce7304d2:34827 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:32879/user/jenkins/test-data/ead4b57f-f2f8-e3b0-3de7-9b12170920ed/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/4afe2aa5663d4a6492fb90218971cf6a as hdfs://localhost:32879/user/jenkins/test-data/ead4b57f-f2f8-e3b0-3de7-9b12170920ed/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/4afe2aa5663d4a6492fb90218971cf6a 2024-12-07T05:49:43,559 INFO [M:0;16b2ce7304d2:34827 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:32879/user/jenkins/test-data/ead4b57f-f2f8-e3b0-3de7-9b12170920ed/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/4afe2aa5663d4a6492fb90218971cf6a, entries=8, sequenceid=72, filesize=6.3 K 2024-12-07T05:49:43,560 DEBUG [M:0;16b2ce7304d2:34827 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:32879/user/jenkins/test-data/ead4b57f-f2f8-e3b0-3de7-9b12170920ed/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/bf841ff183e94572a6aae4b887980cf5 as hdfs://localhost:32879/user/jenkins/test-data/ead4b57f-f2f8-e3b0-3de7-9b12170920ed/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/bf841ff183e94572a6aae4b887980cf5 2024-12-07T05:49:43,567 INFO [M:0;16b2ce7304d2:34827 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:32879/user/jenkins/test-data/ead4b57f-f2f8-e3b0-3de7-9b12170920ed/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/bf841ff183e94572a6aae4b887980cf5, entries=3, sequenceid=72, filesize=5.2 K 2024-12-07T05:49:43,569 INFO [M:0;16b2ce7304d2:34827 {}] regionserver.HRegion(3140): Finished flush of dataSize ~26.82 KB/27459, heapSize ~33.81 KB/34624, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 138ms, sequenceid=72, compaction requested=false 2024-12-07T05:49:43,570 INFO [M:0;16b2ce7304d2:34827 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T05:49:43,570 DEBUG [M:0;16b2ce7304d2:34827 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733550583431Disabling compacts and flushes for region at 1733550583431Disabling writes for close at 1733550583431Obtaining lock to block concurrent updates at 1733550583432 (+1 ms)Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733550583432Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=27459, getHeapSize=34864, getOffHeapSize=0, getCellsCount=85 at 1733550583432Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733550583433 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733550583433Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733550583452 (+19 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733550583452Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733550583470 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733550583487 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733550583487Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733550583504 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733550583519 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733550583519Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@127b425: reopening flushed file at 1733550583538 (+19 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@384eb1fd: reopening flushed file at 1733550583547 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@178ba908: reopening flushed file at 1733550583559 (+12 ms)Finished flush of dataSize ~26.82 KB/27459, heapSize ~33.81 KB/34624, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 138ms, sequenceid=72, compaction requested=false at 1733550583569 (+10 ms)Writing region close event to WAL at 1733550583570 (+1 ms)Closed at 1733550583570 2024-12-07T05:49:43,573 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41373 is added to blk_1073741825_1011 (size=32662) 2024-12-07T05:49:43,573 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44585 is added to blk_1073741825_1011 (size=32662) 2024-12-07T05:49:43,573 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37163 is added to blk_1073741825_1011 (size=32662) 2024-12-07T05:49:43,574 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-07T05:49:43,574 INFO [M:0;16b2ce7304d2:34827 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-07T05:49:43,574 INFO [M:0;16b2ce7304d2:34827 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:34827 2024-12-07T05:49:43,575 INFO [M:0;16b2ce7304d2:34827 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-07T05:49:43,696 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34827-0x101afa027650000, quorum=127.0.0.1:52563, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-07T05:49:43,696 INFO [M:0;16b2ce7304d2:34827 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-07T05:49:43,696 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34827-0x101afa027650000, quorum=127.0.0.1:52563, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-07T05:49:43,732 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@653e6301{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-07T05:49:43,736 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@404caff2{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-07T05:49:43,736 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-07T05:49:43,737 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@343b36c2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-07T05:49:43,737 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@35e2f174{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6254c60a-17a2-d1eb-2791-88b46c3e8eb6/hadoop.log.dir/,STOPPED} 2024-12-07T05:49:43,739 WARN [BP-779431921-172.17.0.2-1733550573781 heartbeating to localhost/127.0.0.1:32879 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-07T05:49:43,739 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-07T05:49:43,739 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-07T05:49:43,739 WARN [BP-779431921-172.17.0.2-1733550573781 heartbeating to localhost/127.0.0.1:32879 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-779431921-172.17.0.2-1733550573781 (Datanode Uuid 46869fc1-8536-4da5-a0db-c9c97f51bb0f) service to localhost/127.0.0.1:32879 2024-12-07T05:49:43,740 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6254c60a-17a2-d1eb-2791-88b46c3e8eb6/cluster_0061e185-2488-1e51-61a4-aa5a92de8580/data/data5/current/BP-779431921-172.17.0.2-1733550573781 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-07T05:49:43,740 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6254c60a-17a2-d1eb-2791-88b46c3e8eb6/cluster_0061e185-2488-1e51-61a4-aa5a92de8580/data/data6/current/BP-779431921-172.17.0.2-1733550573781 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-07T05:49:43,741 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-07T05:49:43,742 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@513cab2c{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-07T05:49:43,743 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@29a123ec{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-07T05:49:43,743 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-07T05:49:43,743 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6af5a446{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-07T05:49:43,743 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@444b27d4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6254c60a-17a2-d1eb-2791-88b46c3e8eb6/hadoop.log.dir/,STOPPED} 2024-12-07T05:49:43,744 WARN [BP-779431921-172.17.0.2-1733550573781 heartbeating to localhost/127.0.0.1:32879 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-07T05:49:43,745 WARN [BP-779431921-172.17.0.2-1733550573781 heartbeating to localhost/127.0.0.1:32879 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-779431921-172.17.0.2-1733550573781 (Datanode Uuid c2b4d2a4-956e-4cc5-a019-01bce9f9e722) service to localhost/127.0.0.1:32879 2024-12-07T05:49:43,745 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6254c60a-17a2-d1eb-2791-88b46c3e8eb6/cluster_0061e185-2488-1e51-61a4-aa5a92de8580/data/data3/current/BP-779431921-172.17.0.2-1733550573781 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-07T05:49:43,745 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6254c60a-17a2-d1eb-2791-88b46c3e8eb6/cluster_0061e185-2488-1e51-61a4-aa5a92de8580/data/data4/current/BP-779431921-172.17.0.2-1733550573781 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-07T05:49:43,746 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-07T05:49:43,746 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-07T05:49:43,746 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-07T05:49:43,751 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@65462677{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-07T05:49:43,752 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@383014b{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-07T05:49:43,752 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-07T05:49:43,752 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4dc262e0{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-07T05:49:43,752 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@431e53b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6254c60a-17a2-d1eb-2791-88b46c3e8eb6/hadoop.log.dir/,STOPPED} 2024-12-07T05:49:43,753 WARN [BP-779431921-172.17.0.2-1733550573781 heartbeating to localhost/127.0.0.1:32879 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-07T05:49:43,753 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-07T05:49:43,753 WARN [BP-779431921-172.17.0.2-1733550573781 heartbeating to localhost/127.0.0.1:32879 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-779431921-172.17.0.2-1733550573781 (Datanode Uuid f81be1d2-0b8b-4847-aa83-c251aa3bed14) service to localhost/127.0.0.1:32879 2024-12-07T05:49:43,753 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-07T05:49:43,754 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6254c60a-17a2-d1eb-2791-88b46c3e8eb6/cluster_0061e185-2488-1e51-61a4-aa5a92de8580/data/data1/current/BP-779431921-172.17.0.2-1733550573781 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-07T05:49:43,754 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6254c60a-17a2-d1eb-2791-88b46c3e8eb6/cluster_0061e185-2488-1e51-61a4-aa5a92de8580/data/data2/current/BP-779431921-172.17.0.2-1733550573781 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-07T05:49:43,754 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-07T05:49:43,762 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@58dbf239{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-07T05:49:43,763 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@13e2962d{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-07T05:49:43,763 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-07T05:49:43,763 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4f93dd{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-07T05:49:43,763 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4395d44b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6254c60a-17a2-d1eb-2791-88b46c3e8eb6/hadoop.log.dir/,STOPPED} 2024-12-07T05:49:43,770 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-07T05:49:43,796 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-07T05:49:43,801 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestHBaseWalOnEC#testReadWrite[0] Thread=89 (was 159), OpenFileDescriptor=447 (was 391) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=253 (was 197) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=8216 (was 8490) 2024-12-07T05:49:43,807 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestHBaseWalOnEC#testReadWrite[1] Thread=89, OpenFileDescriptor=447, MaxFileDescriptor=1048576, SystemLoadAverage=253, ProcessCount=11, AvailableMemoryMB=8216 2024-12-07T05:49:43,807 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=3, rsPorts=, rsClass=null, numDataNodes=3, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-07T05:49:43,807 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6254c60a-17a2-d1eb-2791-88b46c3e8eb6/hadoop.log.dir so I do NOT create it in target/test-data/44c44ce9-788d-29d3-4c40-67853e241664 2024-12-07T05:49:43,807 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6254c60a-17a2-d1eb-2791-88b46c3e8eb6/hadoop.tmp.dir so I do NOT create it in target/test-data/44c44ce9-788d-29d3-4c40-67853e241664 2024-12-07T05:49:43,807 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44c44ce9-788d-29d3-4c40-67853e241664/cluster_b9ed59cb-9a45-c613-76fe-db0f3d760f17, deleteOnExit=true 2024-12-07T05:49:43,807 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-07T05:49:43,808 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44c44ce9-788d-29d3-4c40-67853e241664/test.cache.data in system properties and HBase conf 2024-12-07T05:49:43,808 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44c44ce9-788d-29d3-4c40-67853e241664/hadoop.tmp.dir in system properties and HBase conf 2024-12-07T05:49:43,808 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44c44ce9-788d-29d3-4c40-67853e241664/hadoop.log.dir in system properties and HBase conf 2024-12-07T05:49:43,808 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44c44ce9-788d-29d3-4c40-67853e241664/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-07T05:49:43,808 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44c44ce9-788d-29d3-4c40-67853e241664/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-07T05:49:43,808 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-07T05:49:43,808 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-07T05:49:43,808 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44c44ce9-788d-29d3-4c40-67853e241664/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-07T05:49:43,808 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44c44ce9-788d-29d3-4c40-67853e241664/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-07T05:49:43,808 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44c44ce9-788d-29d3-4c40-67853e241664/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-07T05:49:43,808 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44c44ce9-788d-29d3-4c40-67853e241664/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-07T05:49:43,808 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44c44ce9-788d-29d3-4c40-67853e241664/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-07T05:49:43,809 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44c44ce9-788d-29d3-4c40-67853e241664/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-07T05:49:43,809 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44c44ce9-788d-29d3-4c40-67853e241664/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-07T05:49:43,809 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44c44ce9-788d-29d3-4c40-67853e241664/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-07T05:49:43,809 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44c44ce9-788d-29d3-4c40-67853e241664/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-07T05:49:43,809 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44c44ce9-788d-29d3-4c40-67853e241664/nfs.dump.dir in system properties and HBase conf 2024-12-07T05:49:43,809 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44c44ce9-788d-29d3-4c40-67853e241664/java.io.tmpdir in system properties and HBase conf 2024-12-07T05:49:43,809 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44c44ce9-788d-29d3-4c40-67853e241664/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-07T05:49:43,809 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44c44ce9-788d-29d3-4c40-67853e241664/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-07T05:49:43,809 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44c44ce9-788d-29d3-4c40-67853e241664/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-07T05:49:44,121 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-07T05:49:44,125 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-07T05:49:44,126 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-07T05:49:44,126 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-07T05:49:44,126 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-07T05:49:44,127 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-07T05:49:44,127 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6d772bf2{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44c44ce9-788d-29d3-4c40-67853e241664/hadoop.log.dir/,AVAILABLE} 2024-12-07T05:49:44,127 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4a627a74{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-07T05:49:44,216 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3b0c25ba{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44c44ce9-788d-29d3-4c40-67853e241664/java.io.tmpdir/jetty-localhost-35155-hadoop-hdfs-3_4_1-tests_jar-_-any-5702192343886671530/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-07T05:49:44,217 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6a8e73dc{HTTP/1.1, (http/1.1)}{localhost:35155} 2024-12-07T05:49:44,217 INFO [Time-limited test {}] server.Server(415): Started @12028ms 2024-12-07T05:49:44,419 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-07T05:49:44,422 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-07T05:49:44,422 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-07T05:49:44,422 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-07T05:49:44,422 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-07T05:49:44,425 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2716dd5e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44c44ce9-788d-29d3-4c40-67853e241664/hadoop.log.dir/,AVAILABLE} 2024-12-07T05:49:44,425 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@47d171d0{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-07T05:49:44,515 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@501c8baa{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44c44ce9-788d-29d3-4c40-67853e241664/java.io.tmpdir/jetty-localhost-34891-hadoop-hdfs-3_4_1-tests_jar-_-any-15282804065427715764/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-07T05:49:44,515 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@25be713d{HTTP/1.1, (http/1.1)}{localhost:34891} 2024-12-07T05:49:44,515 INFO [Time-limited test {}] server.Server(415): Started @12326ms 2024-12-07T05:49:44,516 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-07T05:49:44,545 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-07T05:49:44,547 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-07T05:49:44,548 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-07T05:49:44,548 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-07T05:49:44,548 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-07T05:49:44,549 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3ef06ee6{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44c44ce9-788d-29d3-4c40-67853e241664/hadoop.log.dir/,AVAILABLE} 2024-12-07T05:49:44,549 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3cdd19d5{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-07T05:49:44,638 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2d7cac9e{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44c44ce9-788d-29d3-4c40-67853e241664/java.io.tmpdir/jetty-localhost-41703-hadoop-hdfs-3_4_1-tests_jar-_-any-16867661729320005917/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-07T05:49:44,639 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@785c29e1{HTTP/1.1, (http/1.1)}{localhost:41703} 2024-12-07T05:49:44,639 INFO [Time-limited test {}] server.Server(415): Started @12450ms 2024-12-07T05:49:44,640 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-07T05:49:44,683 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-07T05:49:44,687 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-07T05:49:44,688 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-07T05:49:44,688 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-07T05:49:44,688 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-07T05:49:44,689 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@78faf64e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44c44ce9-788d-29d3-4c40-67853e241664/hadoop.log.dir/,AVAILABLE} 2024-12-07T05:49:44,689 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@16e08e17{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-07T05:49:44,781 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@74fb60c3{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44c44ce9-788d-29d3-4c40-67853e241664/java.io.tmpdir/jetty-localhost-32951-hadoop-hdfs-3_4_1-tests_jar-_-any-7268974757267259239/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-07T05:49:44,782 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1380d9e3{HTTP/1.1, (http/1.1)}{localhost:32951} 2024-12-07T05:49:44,782 INFO [Time-limited test {}] server.Server(415): Started @12593ms 2024-12-07T05:49:44,784 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-07T05:49:45,344 WARN [Thread-567 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44c44ce9-788d-29d3-4c40-67853e241664/cluster_b9ed59cb-9a45-c613-76fe-db0f3d760f17/data/data2/current/BP-788461391-172.17.0.2-1733550583832/current, will proceed with Du for space computation calculation, 2024-12-07T05:49:45,344 WARN [Thread-566 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44c44ce9-788d-29d3-4c40-67853e241664/cluster_b9ed59cb-9a45-c613-76fe-db0f3d760f17/data/data1/current/BP-788461391-172.17.0.2-1733550583832/current, will proceed with Du for space computation calculation, 2024-12-07T05:49:45,360 WARN [Thread-507 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-07T05:49:45,363 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xc30c8994c10ef9c0 with lease ID 0x3ea6279e93c7e2e3: Processing first storage report for DS-7f03b68d-18fb-472f-a5a2-8ee2e852f6b5 from datanode DatanodeRegistration(127.0.0.1:42613, datanodeUuid=4c20cca7-e729-43ff-ac15-eb243209ffe9, infoPort=40363, infoSecurePort=0, ipcPort=43515, storageInfo=lv=-57;cid=testClusterID;nsid=1529157707;c=1733550583832) 2024-12-07T05:49:45,363 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc30c8994c10ef9c0 with lease ID 0x3ea6279e93c7e2e3: from storage DS-7f03b68d-18fb-472f-a5a2-8ee2e852f6b5 node DatanodeRegistration(127.0.0.1:42613, datanodeUuid=4c20cca7-e729-43ff-ac15-eb243209ffe9, infoPort=40363, infoSecurePort=0, ipcPort=43515, storageInfo=lv=-57;cid=testClusterID;nsid=1529157707;c=1733550583832), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-07T05:49:45,363 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xc30c8994c10ef9c0 with lease ID 0x3ea6279e93c7e2e3: Processing first storage report for DS-1f7f99be-56fb-4446-906b-51343a2c93a5 from datanode DatanodeRegistration(127.0.0.1:42613, datanodeUuid=4c20cca7-e729-43ff-ac15-eb243209ffe9, infoPort=40363, infoSecurePort=0, ipcPort=43515, storageInfo=lv=-57;cid=testClusterID;nsid=1529157707;c=1733550583832) 2024-12-07T05:49:45,363 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc30c8994c10ef9c0 with lease ID 0x3ea6279e93c7e2e3: from storage DS-1f7f99be-56fb-4446-906b-51343a2c93a5 node DatanodeRegistration(127.0.0.1:42613, datanodeUuid=4c20cca7-e729-43ff-ac15-eb243209ffe9, infoPort=40363, infoSecurePort=0, ipcPort=43515, storageInfo=lv=-57;cid=testClusterID;nsid=1529157707;c=1733550583832), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-07T05:49:45,502 WARN [Thread-578 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44c44ce9-788d-29d3-4c40-67853e241664/cluster_b9ed59cb-9a45-c613-76fe-db0f3d760f17/data/data3/current/BP-788461391-172.17.0.2-1733550583832/current, will proceed with Du for space computation calculation, 2024-12-07T05:49:45,502 WARN [Thread-579 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44c44ce9-788d-29d3-4c40-67853e241664/cluster_b9ed59cb-9a45-c613-76fe-db0f3d760f17/data/data4/current/BP-788461391-172.17.0.2-1733550583832/current, will proceed with Du for space computation calculation, 2024-12-07T05:49:45,518 WARN [Thread-530 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-07T05:49:45,520 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x8e86aaa9eae6e5a2 with lease ID 0x3ea6279e93c7e2e4: Processing first storage report for DS-696f1f77-61e7-4b7f-aef7-bacd0b0571b7 from datanode DatanodeRegistration(127.0.0.1:36487, datanodeUuid=3a8be341-97f1-4a7a-b50f-0bb7383ec952, infoPort=46005, infoSecurePort=0, ipcPort=33373, storageInfo=lv=-57;cid=testClusterID;nsid=1529157707;c=1733550583832) 2024-12-07T05:49:45,520 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8e86aaa9eae6e5a2 with lease ID 0x3ea6279e93c7e2e4: from storage DS-696f1f77-61e7-4b7f-aef7-bacd0b0571b7 node DatanodeRegistration(127.0.0.1:36487, datanodeUuid=3a8be341-97f1-4a7a-b50f-0bb7383ec952, infoPort=46005, infoSecurePort=0, ipcPort=33373, storageInfo=lv=-57;cid=testClusterID;nsid=1529157707;c=1733550583832), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-07T05:49:45,520 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x8e86aaa9eae6e5a2 with lease ID 0x3ea6279e93c7e2e4: Processing first storage report for DS-64d068fa-3e5b-485d-9884-25942578ccb2 from datanode DatanodeRegistration(127.0.0.1:36487, datanodeUuid=3a8be341-97f1-4a7a-b50f-0bb7383ec952, infoPort=46005, infoSecurePort=0, ipcPort=33373, storageInfo=lv=-57;cid=testClusterID;nsid=1529157707;c=1733550583832) 2024-12-07T05:49:45,520 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8e86aaa9eae6e5a2 with lease ID 0x3ea6279e93c7e2e4: from storage DS-64d068fa-3e5b-485d-9884-25942578ccb2 node DatanodeRegistration(127.0.0.1:36487, datanodeUuid=3a8be341-97f1-4a7a-b50f-0bb7383ec952, infoPort=46005, infoSecurePort=0, ipcPort=33373, storageInfo=lv=-57;cid=testClusterID;nsid=1529157707;c=1733550583832), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-07T05:49:45,603 WARN [Thread-589 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44c44ce9-788d-29d3-4c40-67853e241664/cluster_b9ed59cb-9a45-c613-76fe-db0f3d760f17/data/data5/current/BP-788461391-172.17.0.2-1733550583832/current, will proceed with Du for space computation calculation, 2024-12-07T05:49:45,603 WARN [Thread-590 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44c44ce9-788d-29d3-4c40-67853e241664/cluster_b9ed59cb-9a45-c613-76fe-db0f3d760f17/data/data6/current/BP-788461391-172.17.0.2-1733550583832/current, will proceed with Du for space computation calculation, 2024-12-07T05:49:45,624 WARN [Thread-552 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-07T05:49:45,627 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x7788885229032654 with lease ID 0x3ea6279e93c7e2e5: Processing first storage report for DS-37b61a33-5e1c-41df-8c6b-e2e665b17d36 from datanode DatanodeRegistration(127.0.0.1:42355, datanodeUuid=4cf1243f-7a6b-4bb1-a0b8-1ad35764c656, infoPort=40781, infoSecurePort=0, ipcPort=33187, storageInfo=lv=-57;cid=testClusterID;nsid=1529157707;c=1733550583832) 2024-12-07T05:49:45,628 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x7788885229032654 with lease ID 0x3ea6279e93c7e2e5: from storage DS-37b61a33-5e1c-41df-8c6b-e2e665b17d36 node DatanodeRegistration(127.0.0.1:42355, datanodeUuid=4cf1243f-7a6b-4bb1-a0b8-1ad35764c656, infoPort=40781, infoSecurePort=0, ipcPort=33187, storageInfo=lv=-57;cid=testClusterID;nsid=1529157707;c=1733550583832), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-07T05:49:45,628 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x7788885229032654 with lease ID 0x3ea6279e93c7e2e5: Processing first storage report for DS-24a15f76-a662-4f37-b2b7-39a80ba3ac7e from datanode DatanodeRegistration(127.0.0.1:42355, datanodeUuid=4cf1243f-7a6b-4bb1-a0b8-1ad35764c656, infoPort=40781, infoSecurePort=0, ipcPort=33187, storageInfo=lv=-57;cid=testClusterID;nsid=1529157707;c=1733550583832) 2024-12-07T05:49:45,628 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x7788885229032654 with lease ID 0x3ea6279e93c7e2e5: from storage DS-24a15f76-a662-4f37-b2b7-39a80ba3ac7e node DatanodeRegistration(127.0.0.1:42355, datanodeUuid=4cf1243f-7a6b-4bb1-a0b8-1ad35764c656, infoPort=40781, infoSecurePort=0, ipcPort=33187, storageInfo=lv=-57;cid=testClusterID;nsid=1529157707;c=1733550583832), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-07T05:49:45,723 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44c44ce9-788d-29d3-4c40-67853e241664 2024-12-07T05:49:45,726 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44c44ce9-788d-29d3-4c40-67853e241664/cluster_b9ed59cb-9a45-c613-76fe-db0f3d760f17/zookeeper_0, clientPort=64227, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44c44ce9-788d-29d3-4c40-67853e241664/cluster_b9ed59cb-9a45-c613-76fe-db0f3d760f17/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44c44ce9-788d-29d3-4c40-67853e241664/cluster_b9ed59cb-9a45-c613-76fe-db0f3d760f17/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-07T05:49:45,727 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=64227 2024-12-07T05:49:45,727 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T05:49:45,729 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T05:49:45,742 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42355 is added to blk_1073741825_1001 (size=7) 2024-12-07T05:49:45,742 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36487 is added to blk_1073741825_1001 (size=7) 2024-12-07T05:49:45,743 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42613 is added to blk_1073741825_1001 (size=7) 2024-12-07T05:49:45,744 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:42677/user/jenkins/test-data/befe2273-29f2-c7c5-fd12-030f72b174d8 with version=8 2024-12-07T05:49:45,744 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:32879/user/jenkins/test-data/ead4b57f-f2f8-e3b0-3de7-9b12170920ed/hbase-staging 2024-12-07T05:49:45,746 INFO [Time-limited test {}] client.ConnectionUtils(128): master/16b2ce7304d2:0 server-side Connection retries=45 2024-12-07T05:49:45,746 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-07T05:49:45,746 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-07T05:49:45,746 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-07T05:49:45,746 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-07T05:49:45,746 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-07T05:49:45,746 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-07T05:49:45,746 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-07T05:49:45,747 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:36221 2024-12-07T05:49:45,748 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:36221 connecting to ZooKeeper ensemble=127.0.0.1:64227 2024-12-07T05:49:45,794 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:362210x0, quorum=127.0.0.1:64227, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-07T05:49:45,795 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:36221-0x101afa04a420000 connected 2024-12-07T05:49:45,824 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-07T05:49:45,862 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T05:49:45,863 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T05:49:45,865 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:36221-0x101afa04a420000, quorum=127.0.0.1:64227, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-07T05:49:45,865 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:42677/user/jenkins/test-data/befe2273-29f2-c7c5-fd12-030f72b174d8, hbase.cluster.distributed=false 2024-12-07T05:49:45,867 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:36221-0x101afa04a420000, quorum=127.0.0.1:64227, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-07T05:49:45,872 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=36221 2024-12-07T05:49:45,873 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=36221 2024-12-07T05:49:45,874 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=36221 2024-12-07T05:49:45,874 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T05:49:45,875 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=36221 2024-12-07T05:49:45,875 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T05:49:45,876 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=36221 2024-12-07T05:49:45,889 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/16b2ce7304d2:0 server-side Connection retries=45 2024-12-07T05:49:45,889 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-07T05:49:45,889 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-07T05:49:45,889 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-07T05:49:45,889 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-07T05:49:45,889 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-07T05:49:45,889 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-07T05:49:45,889 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-07T05:49:45,890 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:46815 2024-12-07T05:49:45,891 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:46815 connecting to ZooKeeper ensemble=127.0.0.1:64227 2024-12-07T05:49:45,892 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T05:49:45,894 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T05:49:45,904 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:468150x0, quorum=127.0.0.1:64227, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-07T05:49:45,904 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:468150x0, quorum=127.0.0.1:64227, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-07T05:49:45,904 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:46815-0x101afa04a420001 connected 2024-12-07T05:49:45,904 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-07T05:49:45,905 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-07T05:49:45,906 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46815-0x101afa04a420001, quorum=127.0.0.1:64227, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-07T05:49:45,907 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46815-0x101afa04a420001, quorum=127.0.0.1:64227, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-07T05:49:45,907 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=46815 2024-12-07T05:49:45,907 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=46815 2024-12-07T05:49:45,908 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=46815 2024-12-07T05:49:45,908 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=46815 2024-12-07T05:49:45,908 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=46815 2024-12-07T05:49:45,922 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/16b2ce7304d2:0 server-side Connection retries=45 2024-12-07T05:49:45,922 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-07T05:49:45,922 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-07T05:49:45,922 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-07T05:49:45,923 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-07T05:49:45,923 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-07T05:49:45,923 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-07T05:49:45,923 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-07T05:49:45,923 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:35779 2024-12-07T05:49:45,924 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:35779 connecting to ZooKeeper ensemble=127.0.0.1:64227 2024-12-07T05:49:45,925 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T05:49:45,926 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T05:49:45,937 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:357790x0, quorum=127.0.0.1:64227, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-07T05:49:45,937 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:35779-0x101afa04a420002 connected 2024-12-07T05:49:45,937 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35779-0x101afa04a420002, quorum=127.0.0.1:64227, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-07T05:49:45,938 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-07T05:49:45,939 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-07T05:49:45,939 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35779-0x101afa04a420002, quorum=127.0.0.1:64227, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-07T05:49:45,941 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35779-0x101afa04a420002, quorum=127.0.0.1:64227, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-07T05:49:45,941 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=35779 2024-12-07T05:49:45,941 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=35779 2024-12-07T05:49:45,942 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=35779 2024-12-07T05:49:45,942 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=35779 2024-12-07T05:49:45,942 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=35779 2024-12-07T05:49:45,957 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/16b2ce7304d2:0 server-side Connection retries=45 2024-12-07T05:49:45,957 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-07T05:49:45,957 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-07T05:49:45,957 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-07T05:49:45,957 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-07T05:49:45,957 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-07T05:49:45,957 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-07T05:49:45,957 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-07T05:49:45,958 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:41867 2024-12-07T05:49:45,959 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:41867 connecting to ZooKeeper ensemble=127.0.0.1:64227 2024-12-07T05:49:45,960 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T05:49:45,961 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T05:49:45,970 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:418670x0, quorum=127.0.0.1:64227, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-07T05:49:45,970 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:41867-0x101afa04a420003 connected 2024-12-07T05:49:45,971 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41867-0x101afa04a420003, quorum=127.0.0.1:64227, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-07T05:49:45,971 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-07T05:49:45,971 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-07T05:49:45,972 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41867-0x101afa04a420003, quorum=127.0.0.1:64227, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-07T05:49:45,973 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41867-0x101afa04a420003, quorum=127.0.0.1:64227, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-07T05:49:45,975 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=41867 2024-12-07T05:49:45,975 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=41867 2024-12-07T05:49:45,977 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=41867 2024-12-07T05:49:45,980 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=41867 2024-12-07T05:49:45,980 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=41867 2024-12-07T05:49:45,989 DEBUG [M:0;16b2ce7304d2:36221 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;16b2ce7304d2:36221 2024-12-07T05:49:45,989 INFO [master/16b2ce7304d2:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/16b2ce7304d2,36221,1733550585746 2024-12-07T05:49:45,995 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41867-0x101afa04a420003, quorum=127.0.0.1:64227, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-07T05:49:45,995 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35779-0x101afa04a420002, quorum=127.0.0.1:64227, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-07T05:49:45,995 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46815-0x101afa04a420001, quorum=127.0.0.1:64227, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-07T05:49:45,995 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36221-0x101afa04a420000, quorum=127.0.0.1:64227, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-07T05:49:45,996 DEBUG [master/16b2ce7304d2:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:36221-0x101afa04a420000, quorum=127.0.0.1:64227, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/16b2ce7304d2,36221,1733550585746 2024-12-07T05:49:46,003 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35779-0x101afa04a420002, quorum=127.0.0.1:64227, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-07T05:49:46,003 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46815-0x101afa04a420001, quorum=127.0.0.1:64227, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-07T05:49:46,003 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36221-0x101afa04a420000, quorum=127.0.0.1:64227, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T05:49:46,003 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41867-0x101afa04a420003, quorum=127.0.0.1:64227, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-07T05:49:46,003 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35779-0x101afa04a420002, quorum=127.0.0.1:64227, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T05:49:46,003 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46815-0x101afa04a420001, quorum=127.0.0.1:64227, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T05:49:46,004 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41867-0x101afa04a420003, quorum=127.0.0.1:64227, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T05:49:46,004 DEBUG [master/16b2ce7304d2:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:36221-0x101afa04a420000, quorum=127.0.0.1:64227, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-07T05:49:46,005 INFO [master/16b2ce7304d2:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/16b2ce7304d2,36221,1733550585746 from backup master directory 2024-12-07T05:49:46,012 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41867-0x101afa04a420003, quorum=127.0.0.1:64227, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-07T05:49:46,012 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36221-0x101afa04a420000, quorum=127.0.0.1:64227, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/16b2ce7304d2,36221,1733550585746 2024-12-07T05:49:46,012 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35779-0x101afa04a420002, quorum=127.0.0.1:64227, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-07T05:49:46,012 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46815-0x101afa04a420001, quorum=127.0.0.1:64227, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-07T05:49:46,012 WARN [master/16b2ce7304d2:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-07T05:49:46,012 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36221-0x101afa04a420000, quorum=127.0.0.1:64227, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-07T05:49:46,012 INFO [master/16b2ce7304d2:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=16b2ce7304d2,36221,1733550585746 2024-12-07T05:49:46,019 DEBUG [master/16b2ce7304d2:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:42677/user/jenkins/test-data/befe2273-29f2-c7c5-fd12-030f72b174d8/hbase.id] with ID: 56c1930c-71e8-422f-9b3d-53dc0f52ddb2 2024-12-07T05:49:46,019 DEBUG [master/16b2ce7304d2:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:42677/user/jenkins/test-data/befe2273-29f2-c7c5-fd12-030f72b174d8/.tmp/hbase.id 2024-12-07T05:49:46,027 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42355 is added to blk_1073741826_1002 (size=42) 2024-12-07T05:49:46,027 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42613 is added to blk_1073741826_1002 (size=42) 2024-12-07T05:49:46,028 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36487 is added to blk_1073741826_1002 (size=42) 2024-12-07T05:49:46,028 DEBUG [master/16b2ce7304d2:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:42677/user/jenkins/test-data/befe2273-29f2-c7c5-fd12-030f72b174d8/.tmp/hbase.id]:[hdfs://localhost:42677/user/jenkins/test-data/befe2273-29f2-c7c5-fd12-030f72b174d8/hbase.id] 2024-12-07T05:49:46,044 INFO [master/16b2ce7304d2:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T05:49:46,045 INFO [master/16b2ce7304d2:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-07T05:49:46,046 INFO [master/16b2ce7304d2:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-12-07T05:49:46,053 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46815-0x101afa04a420001, quorum=127.0.0.1:64227, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T05:49:46,053 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41867-0x101afa04a420003, quorum=127.0.0.1:64227, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T05:49:46,053 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35779-0x101afa04a420002, quorum=127.0.0.1:64227, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T05:49:46,053 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36221-0x101afa04a420000, quorum=127.0.0.1:64227, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T05:49:46,064 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42613 is added to blk_1073741827_1003 (size=196) 2024-12-07T05:49:46,064 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42355 is added to blk_1073741827_1003 (size=196) 2024-12-07T05:49:46,064 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36487 is added to blk_1073741827_1003 (size=196) 2024-12-07T05:49:46,065 INFO [master/16b2ce7304d2:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-07T05:49:46,066 INFO [master/16b2ce7304d2:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-07T05:49:46,066 INFO [master/16b2ce7304d2:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-07T05:49:46,077 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36487 is added to blk_1073741828_1004 (size=1189) 2024-12-07T05:49:46,077 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42355 is added to blk_1073741828_1004 (size=1189) 2024-12-07T05:49:46,078 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42613 is added to blk_1073741828_1004 (size=1189) 2024-12-07T05:49:46,079 INFO [master/16b2ce7304d2:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:42677/user/jenkins/test-data/befe2273-29f2-c7c5-fd12-030f72b174d8/MasterData/data/master/store 2024-12-07T05:49:46,091 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42355 is added to blk_1073741829_1005 (size=34) 2024-12-07T05:49:46,091 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36487 is added to blk_1073741829_1005 (size=34) 2024-12-07T05:49:46,091 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42613 is added to blk_1073741829_1005 (size=34) 2024-12-07T05:49:46,092 DEBUG [master/16b2ce7304d2:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T05:49:46,092 DEBUG [master/16b2ce7304d2:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-07T05:49:46,092 INFO [master/16b2ce7304d2:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T05:49:46,092 DEBUG [master/16b2ce7304d2:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T05:49:46,092 DEBUG [master/16b2ce7304d2:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-07T05:49:46,092 DEBUG [master/16b2ce7304d2:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T05:49:46,093 INFO [master/16b2ce7304d2:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T05:49:46,093 DEBUG [master/16b2ce7304d2:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733550586092Disabling compacts and flushes for region at 1733550586092Disabling writes for close at 1733550586092Writing region close event to WAL at 1733550586092Closed at 1733550586092 2024-12-07T05:49:46,094 WARN [master/16b2ce7304d2:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:42677/user/jenkins/test-data/befe2273-29f2-c7c5-fd12-030f72b174d8/MasterData/data/master/store/.initializing 2024-12-07T05:49:46,094 DEBUG [master/16b2ce7304d2:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:42677/user/jenkins/test-data/befe2273-29f2-c7c5-fd12-030f72b174d8/MasterData/WALs/16b2ce7304d2,36221,1733550585746 2024-12-07T05:49:46,097 INFO [master/16b2ce7304d2:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=16b2ce7304d2%2C36221%2C1733550585746, suffix=, logDir=hdfs://localhost:42677/user/jenkins/test-data/befe2273-29f2-c7c5-fd12-030f72b174d8/MasterData/WALs/16b2ce7304d2,36221,1733550585746, archiveDir=hdfs://localhost:42677/user/jenkins/test-data/befe2273-29f2-c7c5-fd12-030f72b174d8/MasterData/oldWALs, maxLogs=10 2024-12-07T05:49:46,098 INFO [master/16b2ce7304d2:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 16b2ce7304d2%2C36221%2C1733550585746.1733550586098 2024-12-07T05:49:46,107 INFO [master/16b2ce7304d2:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/befe2273-29f2-c7c5-fd12-030f72b174d8/MasterData/WALs/16b2ce7304d2,36221,1733550585746/16b2ce7304d2%2C36221%2C1733550585746.1733550586098 2024-12-07T05:49:46,109 DEBUG [master/16b2ce7304d2:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40363:40363),(127.0.0.1/127.0.0.1:46005:46005),(127.0.0.1/127.0.0.1:40781:40781)] 2024-12-07T05:49:46,110 DEBUG [master/16b2ce7304d2:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-07T05:49:46,110 DEBUG [master/16b2ce7304d2:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T05:49:46,110 DEBUG [master/16b2ce7304d2:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-07T05:49:46,110 DEBUG [master/16b2ce7304d2:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-07T05:49:46,112 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-07T05:49:46,113 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-07T05:49:46,114 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T05:49:46,114 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T05:49:46,114 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-07T05:49:46,116 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-07T05:49:46,116 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T05:49:46,116 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T05:49:46,117 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-07T05:49:46,119 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-07T05:49:46,119 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T05:49:46,119 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T05:49:46,120 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-07T05:49:46,121 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-07T05:49:46,121 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T05:49:46,122 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T05:49:46,122 DEBUG [master/16b2ce7304d2:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-07T05:49:46,123 DEBUG [master/16b2ce7304d2:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42677/user/jenkins/test-data/befe2273-29f2-c7c5-fd12-030f72b174d8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-07T05:49:46,123 DEBUG [master/16b2ce7304d2:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42677/user/jenkins/test-data/befe2273-29f2-c7c5-fd12-030f72b174d8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-07T05:49:46,125 DEBUG [master/16b2ce7304d2:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-07T05:49:46,125 DEBUG [master/16b2ce7304d2:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-07T05:49:46,125 DEBUG [master/16b2ce7304d2:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-07T05:49:46,126 DEBUG [master/16b2ce7304d2:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-07T05:49:46,129 DEBUG [master/16b2ce7304d2:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42677/user/jenkins/test-data/befe2273-29f2-c7c5-fd12-030f72b174d8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-07T05:49:46,129 INFO [master/16b2ce7304d2:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=58794415, jitterRate=-0.12389494478702545}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-07T05:49:46,130 DEBUG [master/16b2ce7304d2:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733550586110Initializing all the Stores at 1733550586111 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733550586111Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733550586111Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733550586111Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733550586111Cleaning up temporary data from old regions at 1733550586125 (+14 ms)Region opened successfully at 1733550586130 (+5 ms) 2024-12-07T05:49:46,130 INFO [master/16b2ce7304d2:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-07T05:49:46,135 DEBUG [master/16b2ce7304d2:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@333cabde, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=16b2ce7304d2/172.17.0.2:0 2024-12-07T05:49:46,136 INFO [master/16b2ce7304d2:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-07T05:49:46,136 INFO [master/16b2ce7304d2:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-07T05:49:46,136 INFO [master/16b2ce7304d2:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-07T05:49:46,136 INFO [master/16b2ce7304d2:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-07T05:49:46,137 INFO [master/16b2ce7304d2:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-12-07T05:49:46,137 INFO [master/16b2ce7304d2:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-12-07T05:49:46,137 INFO [master/16b2ce7304d2:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-07T05:49:46,139 INFO [master/16b2ce7304d2:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-07T05:49:46,140 DEBUG [master/16b2ce7304d2:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36221-0x101afa04a420000, quorum=127.0.0.1:64227, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-07T05:49:46,153 DEBUG [master/16b2ce7304d2:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-07T05:49:46,154 INFO [master/16b2ce7304d2:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-07T05:49:46,155 DEBUG [master/16b2ce7304d2:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36221-0x101afa04a420000, quorum=127.0.0.1:64227, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-07T05:49:46,162 DEBUG [master/16b2ce7304d2:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-07T05:49:46,162 INFO [master/16b2ce7304d2:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-07T05:49:46,163 DEBUG [master/16b2ce7304d2:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36221-0x101afa04a420000, quorum=127.0.0.1:64227, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-07T05:49:46,170 DEBUG [master/16b2ce7304d2:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-07T05:49:46,171 DEBUG [master/16b2ce7304d2:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36221-0x101afa04a420000, quorum=127.0.0.1:64227, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-07T05:49:46,178 DEBUG [master/16b2ce7304d2:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-07T05:49:46,180 DEBUG [master/16b2ce7304d2:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36221-0x101afa04a420000, quorum=127.0.0.1:64227, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-07T05:49:46,186 DEBUG [master/16b2ce7304d2:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-07T05:49:46,195 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41867-0x101afa04a420003, quorum=127.0.0.1:64227, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-07T05:49:46,195 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36221-0x101afa04a420000, quorum=127.0.0.1:64227, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-07T05:49:46,195 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46815-0x101afa04a420001, quorum=127.0.0.1:64227, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-07T05:49:46,195 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41867-0x101afa04a420003, quorum=127.0.0.1:64227, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T05:49:46,195 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35779-0x101afa04a420002, quorum=127.0.0.1:64227, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-07T05:49:46,195 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36221-0x101afa04a420000, quorum=127.0.0.1:64227, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T05:49:46,195 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46815-0x101afa04a420001, quorum=127.0.0.1:64227, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T05:49:46,195 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35779-0x101afa04a420002, quorum=127.0.0.1:64227, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T05:49:46,196 INFO [master/16b2ce7304d2:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=16b2ce7304d2,36221,1733550585746, sessionid=0x101afa04a420000, setting cluster-up flag (Was=false) 2024-12-07T05:49:46,212 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41867-0x101afa04a420003, quorum=127.0.0.1:64227, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T05:49:46,212 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46815-0x101afa04a420001, quorum=127.0.0.1:64227, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T05:49:46,212 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35779-0x101afa04a420002, quorum=127.0.0.1:64227, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T05:49:46,212 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36221-0x101afa04a420000, quorum=127.0.0.1:64227, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T05:49:46,237 DEBUG [master/16b2ce7304d2:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-07T05:49:46,239 DEBUG [master/16b2ce7304d2:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=16b2ce7304d2,36221,1733550585746 2024-12-07T05:49:46,254 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46815-0x101afa04a420001, quorum=127.0.0.1:64227, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T05:49:46,254 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36221-0x101afa04a420000, quorum=127.0.0.1:64227, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T05:49:46,254 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35779-0x101afa04a420002, quorum=127.0.0.1:64227, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T05:49:46,254 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41867-0x101afa04a420003, quorum=127.0.0.1:64227, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T05:49:46,278 DEBUG [master/16b2ce7304d2:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-07T05:49:46,280 DEBUG [master/16b2ce7304d2:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=16b2ce7304d2,36221,1733550585746 2024-12-07T05:49:46,281 INFO [master/16b2ce7304d2:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:42677/user/jenkins/test-data/befe2273-29f2-c7c5-fd12-030f72b174d8/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-07T05:49:46,283 DEBUG [master/16b2ce7304d2:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-07T05:49:46,283 INFO [master/16b2ce7304d2:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-07T05:49:46,283 INFO [master/16b2ce7304d2:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-07T05:49:46,283 DEBUG [master/16b2ce7304d2:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 16b2ce7304d2,36221,1733550585746 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-07T05:49:46,285 DEBUG [master/16b2ce7304d2:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/16b2ce7304d2:0, corePoolSize=5, maxPoolSize=5 2024-12-07T05:49:46,285 DEBUG [master/16b2ce7304d2:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/16b2ce7304d2:0, corePoolSize=5, maxPoolSize=5 2024-12-07T05:49:46,285 DEBUG [master/16b2ce7304d2:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/16b2ce7304d2:0, corePoolSize=5, maxPoolSize=5 2024-12-07T05:49:46,285 DEBUG [master/16b2ce7304d2:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/16b2ce7304d2:0, corePoolSize=5, maxPoolSize=5 2024-12-07T05:49:46,285 DEBUG [master/16b2ce7304d2:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/16b2ce7304d2:0, corePoolSize=10, maxPoolSize=10 2024-12-07T05:49:46,285 DEBUG [master/16b2ce7304d2:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/16b2ce7304d2:0, corePoolSize=1, maxPoolSize=1 2024-12-07T05:49:46,285 DEBUG [master/16b2ce7304d2:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/16b2ce7304d2:0, corePoolSize=2, maxPoolSize=2 2024-12-07T05:49:46,285 DEBUG [master/16b2ce7304d2:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/16b2ce7304d2:0, corePoolSize=1, maxPoolSize=1 2024-12-07T05:49:46,286 INFO [master/16b2ce7304d2:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733550616286 2024-12-07T05:49:46,286 INFO [master/16b2ce7304d2:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-07T05:49:46,286 INFO [master/16b2ce7304d2:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-07T05:49:46,286 INFO [master/16b2ce7304d2:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-07T05:49:46,286 INFO [master/16b2ce7304d2:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-07T05:49:46,286 INFO [master/16b2ce7304d2:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-07T05:49:46,286 INFO [master/16b2ce7304d2:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-07T05:49:46,287 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-07T05:49:46,287 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-07T05:49:46,288 INFO [master/16b2ce7304d2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-07T05:49:46,288 INFO [master/16b2ce7304d2:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-07T05:49:46,288 INFO [master/16b2ce7304d2:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-07T05:49:46,288 INFO [master/16b2ce7304d2:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-07T05:49:46,289 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T05:49:46,289 INFO [master/16b2ce7304d2:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-07T05:49:46,289 INFO [master/16b2ce7304d2:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-07T05:49:46,289 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-07T05:49:46,289 DEBUG [master/16b2ce7304d2:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/16b2ce7304d2:0:becomeActiveMaster-HFileCleaner.large.0-1733550586289,5,FailOnTimeoutGroup] 2024-12-07T05:49:46,290 DEBUG [master/16b2ce7304d2:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/16b2ce7304d2:0:becomeActiveMaster-HFileCleaner.small.0-1733550586289,5,FailOnTimeoutGroup] 2024-12-07T05:49:46,290 INFO [master/16b2ce7304d2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-07T05:49:46,290 INFO [master/16b2ce7304d2:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-07T05:49:46,290 INFO [master/16b2ce7304d2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-07T05:49:46,290 INFO [master/16b2ce7304d2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-07T05:49:46,298 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42613 is added to blk_1073741831_1007 (size=1321) 2024-12-07T05:49:46,299 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42355 is added to blk_1073741831_1007 (size=1321) 2024-12-07T05:49:46,299 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36487 is added to blk_1073741831_1007 (size=1321) 2024-12-07T05:49:46,300 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:42677/user/jenkins/test-data/befe2273-29f2-c7c5-fd12-030f72b174d8/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-07T05:49:46,300 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:42677/user/jenkins/test-data/befe2273-29f2-c7c5-fd12-030f72b174d8 2024-12-07T05:49:46,310 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42613 is added to blk_1073741832_1008 (size=32) 2024-12-07T05:49:46,310 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42355 is added to blk_1073741832_1008 (size=32) 2024-12-07T05:49:46,310 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36487 is added to blk_1073741832_1008 (size=32) 2024-12-07T05:49:46,311 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T05:49:46,313 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-07T05:49:46,315 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-07T05:49:46,315 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T05:49:46,316 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T05:49:46,316 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-07T05:49:46,318 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-07T05:49:46,318 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T05:49:46,318 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T05:49:46,319 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-07T05:49:46,320 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-07T05:49:46,320 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T05:49:46,321 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T05:49:46,321 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-07T05:49:46,323 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-07T05:49:46,323 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T05:49:46,323 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T05:49:46,324 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-07T05:49:46,324 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42677/user/jenkins/test-data/befe2273-29f2-c7c5-fd12-030f72b174d8/data/hbase/meta/1588230740 2024-12-07T05:49:46,325 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42677/user/jenkins/test-data/befe2273-29f2-c7c5-fd12-030f72b174d8/data/hbase/meta/1588230740 2024-12-07T05:49:46,326 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-07T05:49:46,326 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-07T05:49:46,327 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-07T05:49:46,328 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-07T05:49:46,330 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42677/user/jenkins/test-data/befe2273-29f2-c7c5-fd12-030f72b174d8/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-07T05:49:46,331 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=67356104, jitterRate=0.0036841630935668945}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-07T05:49:46,331 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733550586311Initializing all the Stores at 1733550586312 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733550586312Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733550586313 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733550586313Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733550586313Cleaning up temporary data from old regions at 1733550586326 (+13 ms)Region opened successfully at 1733550586331 (+5 ms) 2024-12-07T05:49:46,332 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-07T05:49:46,332 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-07T05:49:46,332 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-07T05:49:46,332 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-07T05:49:46,332 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-07T05:49:46,332 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-07T05:49:46,332 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733550586332Disabling compacts and flushes for region at 1733550586332Disabling writes for close at 1733550586332Writing region close event to WAL at 1733550586332Closed at 1733550586332 2024-12-07T05:49:46,334 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-07T05:49:46,334 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-07T05:49:46,334 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-07T05:49:46,336 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-07T05:49:46,337 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-07T05:49:46,382 INFO [RS:0;16b2ce7304d2:46815 {}] regionserver.HRegionServer(746): ClusterId : 56c1930c-71e8-422f-9b3d-53dc0f52ddb2 2024-12-07T05:49:46,383 DEBUG [RS:0;16b2ce7304d2:46815 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-07T05:49:46,383 INFO [RS:1;16b2ce7304d2:35779 {}] regionserver.HRegionServer(746): ClusterId : 56c1930c-71e8-422f-9b3d-53dc0f52ddb2 2024-12-07T05:49:46,383 INFO [RS:2;16b2ce7304d2:41867 {}] regionserver.HRegionServer(746): ClusterId : 56c1930c-71e8-422f-9b3d-53dc0f52ddb2 2024-12-07T05:49:46,383 DEBUG [RS:1;16b2ce7304d2:35779 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-07T05:49:46,383 DEBUG [RS:2;16b2ce7304d2:41867 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-07T05:49:46,404 DEBUG [RS:0;16b2ce7304d2:46815 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-07T05:49:46,404 DEBUG [RS:1;16b2ce7304d2:35779 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-07T05:49:46,404 DEBUG [RS:0;16b2ce7304d2:46815 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-07T05:49:46,405 DEBUG [RS:1;16b2ce7304d2:35779 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-07T05:49:46,405 DEBUG [RS:2;16b2ce7304d2:41867 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-07T05:49:46,405 DEBUG [RS:2;16b2ce7304d2:41867 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-07T05:49:46,421 DEBUG [RS:0;16b2ce7304d2:46815 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-07T05:49:46,421 DEBUG [RS:1;16b2ce7304d2:35779 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-07T05:49:46,421 DEBUG [RS:2;16b2ce7304d2:41867 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-07T05:49:46,422 DEBUG [RS:2;16b2ce7304d2:41867 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5c413b37, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=16b2ce7304d2/172.17.0.2:0 2024-12-07T05:49:46,422 DEBUG [RS:1;16b2ce7304d2:35779 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3e01795f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=16b2ce7304d2/172.17.0.2:0 2024-12-07T05:49:46,422 DEBUG [RS:0;16b2ce7304d2:46815 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@377bce4e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=16b2ce7304d2/172.17.0.2:0 2024-12-07T05:49:46,436 DEBUG [RS:0;16b2ce7304d2:46815 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;16b2ce7304d2:46815 2024-12-07T05:49:46,436 INFO [RS:0;16b2ce7304d2:46815 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-07T05:49:46,436 INFO [RS:0;16b2ce7304d2:46815 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-07T05:49:46,437 DEBUG [RS:0;16b2ce7304d2:46815 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-07T05:49:46,437 INFO [RS:0;16b2ce7304d2:46815 {}] regionserver.HRegionServer(2659): reportForDuty to master=16b2ce7304d2,36221,1733550585746 with port=46815, startcode=1733550585889 2024-12-07T05:49:46,438 DEBUG [RS:0;16b2ce7304d2:46815 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-07T05:49:46,440 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52867, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.3 (auth:SIMPLE), service=RegionServerStatusService 2024-12-07T05:49:46,441 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36221 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 16b2ce7304d2,46815,1733550585889 2024-12-07T05:49:46,441 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36221 {}] master.ServerManager(517): Registering regionserver=16b2ce7304d2,46815,1733550585889 2024-12-07T05:49:46,441 DEBUG [RS:1;16b2ce7304d2:35779 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;16b2ce7304d2:35779 2024-12-07T05:49:46,441 INFO [RS:1;16b2ce7304d2:35779 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-07T05:49:46,441 DEBUG [RS:2;16b2ce7304d2:41867 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:2;16b2ce7304d2:41867 2024-12-07T05:49:46,441 INFO [RS:1;16b2ce7304d2:35779 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-07T05:49:46,441 DEBUG [RS:1;16b2ce7304d2:35779 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-07T05:49:46,441 INFO [RS:2;16b2ce7304d2:41867 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-07T05:49:46,442 INFO [RS:2;16b2ce7304d2:41867 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-07T05:49:46,442 DEBUG [RS:2;16b2ce7304d2:41867 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-07T05:49:46,442 INFO [RS:1;16b2ce7304d2:35779 {}] regionserver.HRegionServer(2659): reportForDuty to master=16b2ce7304d2,36221,1733550585746 with port=35779, startcode=1733550585922 2024-12-07T05:49:46,442 INFO [RS:2;16b2ce7304d2:41867 {}] regionserver.HRegionServer(2659): reportForDuty to master=16b2ce7304d2,36221,1733550585746 with port=41867, startcode=1733550585957 2024-12-07T05:49:46,442 DEBUG [RS:1;16b2ce7304d2:35779 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-07T05:49:46,442 DEBUG [RS:2;16b2ce7304d2:41867 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-07T05:49:46,443 DEBUG [RS:0;16b2ce7304d2:46815 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:42677/user/jenkins/test-data/befe2273-29f2-c7c5-fd12-030f72b174d8 2024-12-07T05:49:46,443 DEBUG [RS:0;16b2ce7304d2:46815 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:42677 2024-12-07T05:49:46,444 DEBUG [RS:0;16b2ce7304d2:46815 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-07T05:49:46,445 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51543, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.5 (auth:SIMPLE), service=RegionServerStatusService 2024-12-07T05:49:46,445 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42833, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.4 (auth:SIMPLE), service=RegionServerStatusService 2024-12-07T05:49:46,445 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36221 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 16b2ce7304d2,41867,1733550585957 2024-12-07T05:49:46,445 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36221 {}] master.ServerManager(517): Registering regionserver=16b2ce7304d2,41867,1733550585957 2024-12-07T05:49:46,447 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36221 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 16b2ce7304d2,35779,1733550585922 2024-12-07T05:49:46,447 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36221 {}] master.ServerManager(517): Registering regionserver=16b2ce7304d2,35779,1733550585922 2024-12-07T05:49:46,448 DEBUG [RS:2;16b2ce7304d2:41867 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:42677/user/jenkins/test-data/befe2273-29f2-c7c5-fd12-030f72b174d8 2024-12-07T05:49:46,448 DEBUG [RS:2;16b2ce7304d2:41867 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:42677 2024-12-07T05:49:46,448 DEBUG [RS:2;16b2ce7304d2:41867 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-07T05:49:46,450 DEBUG [RS:1;16b2ce7304d2:35779 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:42677/user/jenkins/test-data/befe2273-29f2-c7c5-fd12-030f72b174d8 2024-12-07T05:49:46,450 DEBUG [RS:1;16b2ce7304d2:35779 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:42677 2024-12-07T05:49:46,450 DEBUG [RS:1;16b2ce7304d2:35779 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-07T05:49:46,453 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36221-0x101afa04a420000, quorum=127.0.0.1:64227, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-07T05:49:46,486 DEBUG [RS:0;16b2ce7304d2:46815 {}] zookeeper.ZKUtil(111): regionserver:46815-0x101afa04a420001, quorum=127.0.0.1:64227, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/16b2ce7304d2,46815,1733550585889 2024-12-07T05:49:46,486 WARN [RS:0;16b2ce7304d2:46815 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-07T05:49:46,486 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [16b2ce7304d2,46815,1733550585889] 2024-12-07T05:49:46,486 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [16b2ce7304d2,35779,1733550585922] 2024-12-07T05:49:46,486 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [16b2ce7304d2,41867,1733550585957] 2024-12-07T05:49:46,486 INFO [RS:0;16b2ce7304d2:46815 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-07T05:49:46,486 DEBUG [RS:1;16b2ce7304d2:35779 {}] zookeeper.ZKUtil(111): regionserver:35779-0x101afa04a420002, quorum=127.0.0.1:64227, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/16b2ce7304d2,35779,1733550585922 2024-12-07T05:49:46,486 DEBUG [RS:2;16b2ce7304d2:41867 {}] zookeeper.ZKUtil(111): regionserver:41867-0x101afa04a420003, quorum=127.0.0.1:64227, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/16b2ce7304d2,41867,1733550585957 2024-12-07T05:49:46,486 WARN [RS:1;16b2ce7304d2:35779 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-07T05:49:46,487 WARN [RS:2;16b2ce7304d2:41867 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-07T05:49:46,487 INFO [RS:1;16b2ce7304d2:35779 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-07T05:49:46,487 DEBUG [RS:0;16b2ce7304d2:46815 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:42677/user/jenkins/test-data/befe2273-29f2-c7c5-fd12-030f72b174d8/WALs/16b2ce7304d2,46815,1733550585889 2024-12-07T05:49:46,487 INFO [RS:2;16b2ce7304d2:41867 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-07T05:49:46,487 DEBUG [RS:1;16b2ce7304d2:35779 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:42677/user/jenkins/test-data/befe2273-29f2-c7c5-fd12-030f72b174d8/WALs/16b2ce7304d2,35779,1733550585922 2024-12-07T05:49:46,487 DEBUG [RS:2;16b2ce7304d2:41867 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:42677/user/jenkins/test-data/befe2273-29f2-c7c5-fd12-030f72b174d8/WALs/16b2ce7304d2,41867,1733550585957 2024-12-07T05:49:46,487 WARN [16b2ce7304d2:36221 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-07T05:49:46,491 INFO [RS:1;16b2ce7304d2:35779 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-07T05:49:46,491 INFO [RS:2;16b2ce7304d2:41867 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-07T05:49:46,491 INFO [RS:0;16b2ce7304d2:46815 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-07T05:49:46,494 INFO [RS:1;16b2ce7304d2:35779 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-07T05:49:46,495 INFO [RS:1;16b2ce7304d2:35779 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-07T05:49:46,495 INFO [RS:1;16b2ce7304d2:35779 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-07T05:49:46,495 INFO [RS:1;16b2ce7304d2:35779 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-07T05:49:46,495 INFO [RS:2;16b2ce7304d2:41867 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-07T05:49:46,496 INFO [RS:2;16b2ce7304d2:41867 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-07T05:49:46,496 INFO [RS:2;16b2ce7304d2:41867 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-07T05:49:46,496 INFO [RS:1;16b2ce7304d2:35779 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-07T05:49:46,496 INFO [RS:2;16b2ce7304d2:41867 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-07T05:49:46,496 INFO [RS:1;16b2ce7304d2:35779 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-07T05:49:46,496 DEBUG [RS:1;16b2ce7304d2:35779 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/16b2ce7304d2:0, corePoolSize=1, maxPoolSize=1 2024-12-07T05:49:46,496 DEBUG [RS:1;16b2ce7304d2:35779 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/16b2ce7304d2:0, corePoolSize=1, maxPoolSize=1 2024-12-07T05:49:46,497 DEBUG [RS:1;16b2ce7304d2:35779 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/16b2ce7304d2:0, corePoolSize=1, maxPoolSize=1 2024-12-07T05:49:46,497 DEBUG [RS:1;16b2ce7304d2:35779 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/16b2ce7304d2:0, corePoolSize=1, maxPoolSize=1 2024-12-07T05:49:46,497 DEBUG [RS:1;16b2ce7304d2:35779 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/16b2ce7304d2:0, corePoolSize=1, maxPoolSize=1 2024-12-07T05:49:46,497 DEBUG [RS:1;16b2ce7304d2:35779 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/16b2ce7304d2:0, corePoolSize=2, maxPoolSize=2 2024-12-07T05:49:46,497 DEBUG [RS:1;16b2ce7304d2:35779 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/16b2ce7304d2:0, corePoolSize=1, maxPoolSize=1 2024-12-07T05:49:46,497 DEBUG [RS:1;16b2ce7304d2:35779 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/16b2ce7304d2:0, corePoolSize=1, maxPoolSize=1 2024-12-07T05:49:46,497 DEBUG [RS:1;16b2ce7304d2:35779 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/16b2ce7304d2:0, corePoolSize=1, maxPoolSize=1 2024-12-07T05:49:46,497 INFO [RS:0;16b2ce7304d2:46815 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-07T05:49:46,497 DEBUG [RS:1;16b2ce7304d2:35779 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/16b2ce7304d2:0, corePoolSize=1, maxPoolSize=1 2024-12-07T05:49:46,497 INFO [RS:2;16b2ce7304d2:41867 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-07T05:49:46,497 DEBUG [RS:1;16b2ce7304d2:35779 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/16b2ce7304d2:0, corePoolSize=1, maxPoolSize=1 2024-12-07T05:49:46,497 INFO [RS:2;16b2ce7304d2:41867 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-07T05:49:46,497 DEBUG [RS:1;16b2ce7304d2:35779 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/16b2ce7304d2:0, corePoolSize=1, maxPoolSize=1 2024-12-07T05:49:46,497 DEBUG [RS:1;16b2ce7304d2:35779 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/16b2ce7304d2:0, corePoolSize=3, maxPoolSize=3 2024-12-07T05:49:46,497 INFO [RS:0;16b2ce7304d2:46815 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-07T05:49:46,497 DEBUG [RS:1;16b2ce7304d2:35779 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/16b2ce7304d2:0, corePoolSize=3, maxPoolSize=3 2024-12-07T05:49:46,497 DEBUG [RS:2;16b2ce7304d2:41867 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/16b2ce7304d2:0, corePoolSize=1, maxPoolSize=1 2024-12-07T05:49:46,497 INFO [RS:0;16b2ce7304d2:46815 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-07T05:49:46,498 DEBUG [RS:2;16b2ce7304d2:41867 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/16b2ce7304d2:0, corePoolSize=1, maxPoolSize=1 2024-12-07T05:49:46,498 DEBUG [RS:2;16b2ce7304d2:41867 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/16b2ce7304d2:0, corePoolSize=1, maxPoolSize=1 2024-12-07T05:49:46,498 INFO [RS:0;16b2ce7304d2:46815 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-07T05:49:46,498 DEBUG [RS:2;16b2ce7304d2:41867 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/16b2ce7304d2:0, corePoolSize=1, maxPoolSize=1 2024-12-07T05:49:46,498 INFO [RS:1;16b2ce7304d2:35779 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-07T05:49:46,498 DEBUG [RS:2;16b2ce7304d2:41867 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/16b2ce7304d2:0, corePoolSize=1, maxPoolSize=1 2024-12-07T05:49:46,498 INFO [RS:1;16b2ce7304d2:35779 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-07T05:49:46,498 DEBUG [RS:2;16b2ce7304d2:41867 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/16b2ce7304d2:0, corePoolSize=2, maxPoolSize=2 2024-12-07T05:49:46,498 INFO [RS:1;16b2ce7304d2:35779 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-07T05:49:46,498 INFO [RS:1;16b2ce7304d2:35779 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-07T05:49:46,498 DEBUG [RS:2;16b2ce7304d2:41867 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/16b2ce7304d2:0, corePoolSize=1, maxPoolSize=1 2024-12-07T05:49:46,498 INFO [RS:1;16b2ce7304d2:35779 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-07T05:49:46,498 DEBUG [RS:2;16b2ce7304d2:41867 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/16b2ce7304d2:0, corePoolSize=1, maxPoolSize=1 2024-12-07T05:49:46,498 INFO [RS:1;16b2ce7304d2:35779 {}] hbase.ChoreService(168): Chore ScheduledChore name=16b2ce7304d2,35779,1733550585922-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-07T05:49:46,498 DEBUG [RS:2;16b2ce7304d2:41867 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/16b2ce7304d2:0, corePoolSize=1, maxPoolSize=1 2024-12-07T05:49:46,498 DEBUG [RS:2;16b2ce7304d2:41867 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/16b2ce7304d2:0, corePoolSize=1, maxPoolSize=1 2024-12-07T05:49:46,498 DEBUG [RS:2;16b2ce7304d2:41867 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/16b2ce7304d2:0, corePoolSize=1, maxPoolSize=1 2024-12-07T05:49:46,499 INFO [RS:0;16b2ce7304d2:46815 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-07T05:49:46,499 DEBUG [RS:2;16b2ce7304d2:41867 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/16b2ce7304d2:0, corePoolSize=1, maxPoolSize=1 2024-12-07T05:49:46,499 DEBUG [RS:2;16b2ce7304d2:41867 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/16b2ce7304d2:0, corePoolSize=3, maxPoolSize=3 2024-12-07T05:49:46,499 INFO [RS:0;16b2ce7304d2:46815 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-07T05:49:46,499 DEBUG [RS:2;16b2ce7304d2:41867 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/16b2ce7304d2:0, corePoolSize=3, maxPoolSize=3 2024-12-07T05:49:46,499 DEBUG [RS:0;16b2ce7304d2:46815 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/16b2ce7304d2:0, corePoolSize=1, maxPoolSize=1 2024-12-07T05:49:46,499 DEBUG [RS:0;16b2ce7304d2:46815 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/16b2ce7304d2:0, corePoolSize=1, maxPoolSize=1 2024-12-07T05:49:46,499 DEBUG [RS:0;16b2ce7304d2:46815 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/16b2ce7304d2:0, corePoolSize=1, maxPoolSize=1 2024-12-07T05:49:46,499 DEBUG [RS:0;16b2ce7304d2:46815 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/16b2ce7304d2:0, corePoolSize=1, maxPoolSize=1 2024-12-07T05:49:46,499 DEBUG [RS:0;16b2ce7304d2:46815 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/16b2ce7304d2:0, corePoolSize=1, maxPoolSize=1 2024-12-07T05:49:46,499 DEBUG [RS:0;16b2ce7304d2:46815 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/16b2ce7304d2:0, corePoolSize=2, maxPoolSize=2 2024-12-07T05:49:46,499 DEBUG [RS:0;16b2ce7304d2:46815 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/16b2ce7304d2:0, corePoolSize=1, maxPoolSize=1 2024-12-07T05:49:46,500 DEBUG [RS:0;16b2ce7304d2:46815 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/16b2ce7304d2:0, corePoolSize=1, maxPoolSize=1 2024-12-07T05:49:46,500 DEBUG [RS:0;16b2ce7304d2:46815 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/16b2ce7304d2:0, corePoolSize=1, maxPoolSize=1 2024-12-07T05:49:46,500 DEBUG [RS:0;16b2ce7304d2:46815 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/16b2ce7304d2:0, corePoolSize=1, maxPoolSize=1 2024-12-07T05:49:46,500 DEBUG [RS:0;16b2ce7304d2:46815 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/16b2ce7304d2:0, corePoolSize=1, maxPoolSize=1 2024-12-07T05:49:46,500 DEBUG [RS:0;16b2ce7304d2:46815 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/16b2ce7304d2:0, corePoolSize=1, maxPoolSize=1 2024-12-07T05:49:46,500 DEBUG [RS:0;16b2ce7304d2:46815 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/16b2ce7304d2:0, corePoolSize=3, maxPoolSize=3 2024-12-07T05:49:46,500 DEBUG [RS:0;16b2ce7304d2:46815 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/16b2ce7304d2:0, corePoolSize=3, maxPoolSize=3 2024-12-07T05:49:46,504 INFO [RS:2;16b2ce7304d2:41867 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-07T05:49:46,504 INFO [RS:0;16b2ce7304d2:46815 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-07T05:49:46,504 INFO [RS:2;16b2ce7304d2:41867 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-07T05:49:46,504 INFO [RS:0;16b2ce7304d2:46815 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-07T05:49:46,504 INFO [RS:0;16b2ce7304d2:46815 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-07T05:49:46,504 INFO [RS:2;16b2ce7304d2:41867 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-07T05:49:46,504 INFO [RS:2;16b2ce7304d2:41867 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-07T05:49:46,504 INFO [RS:0;16b2ce7304d2:46815 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-07T05:49:46,504 INFO [RS:2;16b2ce7304d2:41867 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-07T05:49:46,504 INFO [RS:0;16b2ce7304d2:46815 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-07T05:49:46,504 INFO [RS:0;16b2ce7304d2:46815 {}] hbase.ChoreService(168): Chore ScheduledChore name=16b2ce7304d2,46815,1733550585889-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-07T05:49:46,504 INFO [RS:2;16b2ce7304d2:41867 {}] hbase.ChoreService(168): Chore ScheduledChore name=16b2ce7304d2,41867,1733550585957-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-07T05:49:46,511 INFO [RS:1;16b2ce7304d2:35779 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-07T05:49:46,512 INFO [RS:1;16b2ce7304d2:35779 {}] hbase.ChoreService(168): Chore ScheduledChore name=16b2ce7304d2,35779,1733550585922-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-07T05:49:46,512 INFO [RS:1;16b2ce7304d2:35779 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T05:49:46,512 INFO [RS:1;16b2ce7304d2:35779 {}] regionserver.Replication(171): 16b2ce7304d2,35779,1733550585922 started 2024-12-07T05:49:46,519 INFO [RS:2;16b2ce7304d2:41867 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-07T05:49:46,519 INFO [RS:2;16b2ce7304d2:41867 {}] hbase.ChoreService(168): Chore ScheduledChore name=16b2ce7304d2,41867,1733550585957-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-07T05:49:46,520 INFO [RS:2;16b2ce7304d2:41867 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T05:49:46,520 INFO [RS:2;16b2ce7304d2:41867 {}] regionserver.Replication(171): 16b2ce7304d2,41867,1733550585957 started 2024-12-07T05:49:46,521 INFO [RS:0;16b2ce7304d2:46815 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-07T05:49:46,521 INFO [RS:0;16b2ce7304d2:46815 {}] hbase.ChoreService(168): Chore ScheduledChore name=16b2ce7304d2,46815,1733550585889-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-07T05:49:46,522 INFO [RS:0;16b2ce7304d2:46815 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T05:49:46,522 INFO [RS:0;16b2ce7304d2:46815 {}] regionserver.Replication(171): 16b2ce7304d2,46815,1733550585889 started 2024-12-07T05:49:46,524 INFO [RS:1;16b2ce7304d2:35779 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T05:49:46,524 INFO [RS:1;16b2ce7304d2:35779 {}] regionserver.HRegionServer(1482): Serving as 16b2ce7304d2,35779,1733550585922, RpcServer on 16b2ce7304d2/172.17.0.2:35779, sessionid=0x101afa04a420002 2024-12-07T05:49:46,524 DEBUG [RS:1;16b2ce7304d2:35779 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-07T05:49:46,524 DEBUG [RS:1;16b2ce7304d2:35779 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 16b2ce7304d2,35779,1733550585922 2024-12-07T05:49:46,525 DEBUG [RS:1;16b2ce7304d2:35779 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '16b2ce7304d2,35779,1733550585922' 2024-12-07T05:49:46,525 DEBUG [RS:1;16b2ce7304d2:35779 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-07T05:49:46,525 DEBUG [RS:1;16b2ce7304d2:35779 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-07T05:49:46,526 DEBUG [RS:1;16b2ce7304d2:35779 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-07T05:49:46,526 DEBUG [RS:1;16b2ce7304d2:35779 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-07T05:49:46,526 DEBUG [RS:1;16b2ce7304d2:35779 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 16b2ce7304d2,35779,1733550585922 2024-12-07T05:49:46,526 DEBUG [RS:1;16b2ce7304d2:35779 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '16b2ce7304d2,35779,1733550585922' 2024-12-07T05:49:46,526 DEBUG [RS:1;16b2ce7304d2:35779 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-07T05:49:46,526 DEBUG [RS:1;16b2ce7304d2:35779 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-07T05:49:46,527 DEBUG [RS:1;16b2ce7304d2:35779 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-07T05:49:46,527 INFO [RS:1;16b2ce7304d2:35779 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-07T05:49:46,527 INFO [RS:1;16b2ce7304d2:35779 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-07T05:49:46,532 INFO [RS:2;16b2ce7304d2:41867 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T05:49:46,532 INFO [RS:2;16b2ce7304d2:41867 {}] regionserver.HRegionServer(1482): Serving as 16b2ce7304d2,41867,1733550585957, RpcServer on 16b2ce7304d2/172.17.0.2:41867, sessionid=0x101afa04a420003 2024-12-07T05:49:46,532 DEBUG [RS:2;16b2ce7304d2:41867 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-07T05:49:46,533 DEBUG [RS:2;16b2ce7304d2:41867 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 16b2ce7304d2,41867,1733550585957 2024-12-07T05:49:46,533 DEBUG [RS:2;16b2ce7304d2:41867 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '16b2ce7304d2,41867,1733550585957' 2024-12-07T05:49:46,533 DEBUG [RS:2;16b2ce7304d2:41867 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-07T05:49:46,533 DEBUG [RS:2;16b2ce7304d2:41867 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-07T05:49:46,534 INFO [RS:0;16b2ce7304d2:46815 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T05:49:46,534 INFO [RS:0;16b2ce7304d2:46815 {}] regionserver.HRegionServer(1482): Serving as 16b2ce7304d2,46815,1733550585889, RpcServer on 16b2ce7304d2/172.17.0.2:46815, sessionid=0x101afa04a420001 2024-12-07T05:49:46,534 DEBUG [RS:2;16b2ce7304d2:41867 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-07T05:49:46,534 DEBUG [RS:2;16b2ce7304d2:41867 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-07T05:49:46,534 DEBUG [RS:0;16b2ce7304d2:46815 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-07T05:49:46,534 DEBUG [RS:0;16b2ce7304d2:46815 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 16b2ce7304d2,46815,1733550585889 2024-12-07T05:49:46,534 DEBUG [RS:2;16b2ce7304d2:41867 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 16b2ce7304d2,41867,1733550585957 2024-12-07T05:49:46,534 DEBUG [RS:0;16b2ce7304d2:46815 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '16b2ce7304d2,46815,1733550585889' 2024-12-07T05:49:46,534 DEBUG [RS:0;16b2ce7304d2:46815 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-07T05:49:46,534 DEBUG [RS:2;16b2ce7304d2:41867 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '16b2ce7304d2,41867,1733550585957' 2024-12-07T05:49:46,534 DEBUG [RS:2;16b2ce7304d2:41867 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-07T05:49:46,534 DEBUG [RS:0;16b2ce7304d2:46815 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-07T05:49:46,534 DEBUG [RS:2;16b2ce7304d2:41867 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-07T05:49:46,535 DEBUG [RS:0;16b2ce7304d2:46815 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-07T05:49:46,535 DEBUG [RS:0;16b2ce7304d2:46815 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-07T05:49:46,535 DEBUG [RS:2;16b2ce7304d2:41867 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-07T05:49:46,535 DEBUG [RS:0;16b2ce7304d2:46815 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 16b2ce7304d2,46815,1733550585889 2024-12-07T05:49:46,535 DEBUG [RS:0;16b2ce7304d2:46815 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '16b2ce7304d2,46815,1733550585889' 2024-12-07T05:49:46,535 INFO [RS:2;16b2ce7304d2:41867 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-07T05:49:46,535 DEBUG [RS:0;16b2ce7304d2:46815 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-07T05:49:46,535 INFO [RS:2;16b2ce7304d2:41867 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-07T05:49:46,535 DEBUG [RS:0;16b2ce7304d2:46815 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-07T05:49:46,536 DEBUG [RS:0;16b2ce7304d2:46815 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-07T05:49:46,536 INFO [RS:0;16b2ce7304d2:46815 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-07T05:49:46,536 INFO [RS:0;16b2ce7304d2:46815 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-07T05:49:46,632 INFO [RS:1;16b2ce7304d2:35779 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=16b2ce7304d2%2C35779%2C1733550585922, suffix=, logDir=hdfs://localhost:42677/user/jenkins/test-data/befe2273-29f2-c7c5-fd12-030f72b174d8/WALs/16b2ce7304d2,35779,1733550585922, archiveDir=hdfs://localhost:42677/user/jenkins/test-data/befe2273-29f2-c7c5-fd12-030f72b174d8/oldWALs, maxLogs=32 2024-12-07T05:49:46,637 INFO [RS:1;16b2ce7304d2:35779 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 16b2ce7304d2%2C35779%2C1733550585922.1733550586636 2024-12-07T05:49:46,639 INFO [RS:0;16b2ce7304d2:46815 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=16b2ce7304d2%2C46815%2C1733550585889, suffix=, logDir=hdfs://localhost:42677/user/jenkins/test-data/befe2273-29f2-c7c5-fd12-030f72b174d8/WALs/16b2ce7304d2,46815,1733550585889, archiveDir=hdfs://localhost:42677/user/jenkins/test-data/befe2273-29f2-c7c5-fd12-030f72b174d8/oldWALs, maxLogs=32 2024-12-07T05:49:46,639 INFO [RS:2;16b2ce7304d2:41867 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=16b2ce7304d2%2C41867%2C1733550585957, suffix=, logDir=hdfs://localhost:42677/user/jenkins/test-data/befe2273-29f2-c7c5-fd12-030f72b174d8/WALs/16b2ce7304d2,41867,1733550585957, archiveDir=hdfs://localhost:42677/user/jenkins/test-data/befe2273-29f2-c7c5-fd12-030f72b174d8/oldWALs, maxLogs=32 2024-12-07T05:49:46,641 INFO [RS:0;16b2ce7304d2:46815 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 16b2ce7304d2%2C46815%2C1733550585889.1733550586641 2024-12-07T05:49:46,641 INFO [RS:2;16b2ce7304d2:41867 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 16b2ce7304d2%2C41867%2C1733550585957.1733550586641 2024-12-07T05:49:46,648 INFO [RS:1;16b2ce7304d2:35779 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/befe2273-29f2-c7c5-fd12-030f72b174d8/WALs/16b2ce7304d2,35779,1733550585922/16b2ce7304d2%2C35779%2C1733550585922.1733550586636 2024-12-07T05:49:46,651 DEBUG [RS:1;16b2ce7304d2:35779 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46005:46005),(127.0.0.1/127.0.0.1:40781:40781),(127.0.0.1/127.0.0.1:40363:40363)] 2024-12-07T05:49:46,651 INFO [RS:2;16b2ce7304d2:41867 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/befe2273-29f2-c7c5-fd12-030f72b174d8/WALs/16b2ce7304d2,41867,1733550585957/16b2ce7304d2%2C41867%2C1733550585957.1733550586641 2024-12-07T05:49:46,651 INFO [RS:0;16b2ce7304d2:46815 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/befe2273-29f2-c7c5-fd12-030f72b174d8/WALs/16b2ce7304d2,46815,1733550585889/16b2ce7304d2%2C46815%2C1733550585889.1733550586641 2024-12-07T05:49:46,652 DEBUG [RS:2;16b2ce7304d2:41867 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40363:40363),(127.0.0.1/127.0.0.1:40781:40781),(127.0.0.1/127.0.0.1:46005:46005)] 2024-12-07T05:49:46,652 DEBUG [RS:0;16b2ce7304d2:46815 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40781:40781),(127.0.0.1/127.0.0.1:46005:46005),(127.0.0.1/127.0.0.1:40363:40363)] 2024-12-07T05:49:46,738 DEBUG [16b2ce7304d2:36221 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=3, allServersCount=3 2024-12-07T05:49:46,739 DEBUG [16b2ce7304d2:36221 {}] balancer.BalancerClusterState(204): Hosts are {16b2ce7304d2=0} racks are {/default-rack=0} 2024-12-07T05:49:46,745 DEBUG [16b2ce7304d2:36221 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-07T05:49:46,745 DEBUG [16b2ce7304d2:36221 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-07T05:49:46,745 DEBUG [16b2ce7304d2:36221 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-07T05:49:46,745 DEBUG [16b2ce7304d2:36221 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-07T05:49:46,745 DEBUG [16b2ce7304d2:36221 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-07T05:49:46,745 DEBUG [16b2ce7304d2:36221 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-07T05:49:46,745 INFO [16b2ce7304d2:36221 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-07T05:49:46,745 INFO [16b2ce7304d2:36221 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-07T05:49:46,745 INFO [16b2ce7304d2:36221 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-07T05:49:46,746 DEBUG [16b2ce7304d2:36221 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-07T05:49:46,747 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=16b2ce7304d2,35779,1733550585922 2024-12-07T05:49:46,751 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 16b2ce7304d2,35779,1733550585922, state=OPENING 2024-12-07T05:49:46,786 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-07T05:49:46,795 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41867-0x101afa04a420003, quorum=127.0.0.1:64227, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T05:49:46,795 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36221-0x101afa04a420000, quorum=127.0.0.1:64227, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T05:49:46,795 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46815-0x101afa04a420001, quorum=127.0.0.1:64227, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T05:49:46,795 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35779-0x101afa04a420002, quorum=127.0.0.1:64227, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T05:49:46,797 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-07T05:49:46,797 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=16b2ce7304d2,35779,1733550585922}] 2024-12-07T05:49:46,797 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-07T05:49:46,797 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-07T05:49:46,798 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-07T05:49:46,798 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-07T05:49:46,954 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-07T05:49:46,957 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-9-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48041, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-07T05:49:46,964 INFO [RS_OPEN_META-regionserver/16b2ce7304d2:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-07T05:49:46,965 INFO [RS_OPEN_META-regionserver/16b2ce7304d2:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-07T05:49:46,969 INFO [RS_OPEN_META-regionserver/16b2ce7304d2:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=16b2ce7304d2%2C35779%2C1733550585922.meta, suffix=.meta, logDir=hdfs://localhost:42677/user/jenkins/test-data/befe2273-29f2-c7c5-fd12-030f72b174d8/WALs/16b2ce7304d2,35779,1733550585922, archiveDir=hdfs://localhost:42677/user/jenkins/test-data/befe2273-29f2-c7c5-fd12-030f72b174d8/oldWALs, maxLogs=32 2024-12-07T05:49:46,970 INFO [RS_OPEN_META-regionserver/16b2ce7304d2:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 16b2ce7304d2%2C35779%2C1733550585922.meta.1733550586970.meta 2024-12-07T05:49:46,977 INFO [RS_OPEN_META-regionserver/16b2ce7304d2:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/befe2273-29f2-c7c5-fd12-030f72b174d8/WALs/16b2ce7304d2,35779,1733550585922/16b2ce7304d2%2C35779%2C1733550585922.meta.1733550586970.meta 2024-12-07T05:49:46,981 DEBUG [RS_OPEN_META-regionserver/16b2ce7304d2:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40363:40363),(127.0.0.1/127.0.0.1:40781:40781),(127.0.0.1/127.0.0.1:46005:46005)] 2024-12-07T05:49:46,982 DEBUG [RS_OPEN_META-regionserver/16b2ce7304d2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-07T05:49:46,982 DEBUG [RS_OPEN_META-regionserver/16b2ce7304d2:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-07T05:49:46,982 DEBUG [RS_OPEN_META-regionserver/16b2ce7304d2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-07T05:49:46,983 INFO [RS_OPEN_META-regionserver/16b2ce7304d2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-07T05:49:46,983 DEBUG [RS_OPEN_META-regionserver/16b2ce7304d2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-07T05:49:46,983 DEBUG [RS_OPEN_META-regionserver/16b2ce7304d2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T05:49:46,983 DEBUG [RS_OPEN_META-regionserver/16b2ce7304d2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-07T05:49:46,983 DEBUG [RS_OPEN_META-regionserver/16b2ce7304d2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-07T05:49:46,984 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-07T05:49:46,986 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-07T05:49:46,986 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T05:49:46,986 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T05:49:46,986 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-07T05:49:46,987 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-07T05:49:46,987 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T05:49:46,988 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T05:49:46,988 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-07T05:49:46,989 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-07T05:49:46,989 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T05:49:46,989 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T05:49:46,989 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-07T05:49:46,990 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-07T05:49:46,990 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T05:49:46,991 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T05:49:46,991 DEBUG [RS_OPEN_META-regionserver/16b2ce7304d2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-07T05:49:46,992 DEBUG [RS_OPEN_META-regionserver/16b2ce7304d2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42677/user/jenkins/test-data/befe2273-29f2-c7c5-fd12-030f72b174d8/data/hbase/meta/1588230740 2024-12-07T05:49:46,993 DEBUG [RS_OPEN_META-regionserver/16b2ce7304d2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42677/user/jenkins/test-data/befe2273-29f2-c7c5-fd12-030f72b174d8/data/hbase/meta/1588230740 2024-12-07T05:49:46,995 DEBUG [RS_OPEN_META-regionserver/16b2ce7304d2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-07T05:49:46,995 DEBUG [RS_OPEN_META-regionserver/16b2ce7304d2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-07T05:49:46,996 DEBUG [RS_OPEN_META-regionserver/16b2ce7304d2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-07T05:49:46,997 DEBUG [RS_OPEN_META-regionserver/16b2ce7304d2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-07T05:49:46,998 INFO [RS_OPEN_META-regionserver/16b2ce7304d2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=68615380, jitterRate=0.022448837757110596}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-07T05:49:46,998 DEBUG [RS_OPEN_META-regionserver/16b2ce7304d2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-07T05:49:47,000 DEBUG [RS_OPEN_META-regionserver/16b2ce7304d2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733550586983Writing region info on filesystem at 1733550586983Initializing all the Stores at 1733550586984 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733550586984Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733550586984Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733550586984Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733550586984Cleaning up temporary data from old regions at 1733550586995 (+11 ms)Running coprocessor post-open hooks at 1733550586998 (+3 ms)Region opened successfully at 1733550586999 (+1 ms) 2024-12-07T05:49:47,001 INFO [RS_OPEN_META-regionserver/16b2ce7304d2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733550586953 2024-12-07T05:49:47,004 DEBUG [RS_OPEN_META-regionserver/16b2ce7304d2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-07T05:49:47,005 INFO [RS_OPEN_META-regionserver/16b2ce7304d2:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-07T05:49:47,005 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=16b2ce7304d2,35779,1733550585922 2024-12-07T05:49:47,007 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 16b2ce7304d2,35779,1733550585922, state=OPEN 2024-12-07T05:49:47,028 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41867-0x101afa04a420003, quorum=127.0.0.1:64227, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-07T05:49:47,028 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36221-0x101afa04a420000, quorum=127.0.0.1:64227, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-07T05:49:47,028 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35779-0x101afa04a420002, quorum=127.0.0.1:64227, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-07T05:49:47,028 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46815-0x101afa04a420001, quorum=127.0.0.1:64227, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-07T05:49:47,028 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=16b2ce7304d2,35779,1733550585922 2024-12-07T05:49:47,028 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-07T05:49:47,028 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-07T05:49:47,028 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-07T05:49:47,028 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-07T05:49:47,033 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-07T05:49:47,034 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=16b2ce7304d2,35779,1733550585922 in 231 msec 2024-12-07T05:49:47,039 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-07T05:49:47,039 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 700 msec 2024-12-07T05:49:47,041 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-07T05:49:47,041 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-07T05:49:47,043 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-07T05:49:47,043 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=16b2ce7304d2,35779,1733550585922, seqNum=-1] 2024-12-07T05:49:47,043 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T05:49:47,045 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-9-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56681, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T05:49:47,053 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 769 msec 2024-12-07T05:49:47,054 INFO [master/16b2ce7304d2:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733550587053, completionTime=-1 2024-12-07T05:49:47,054 INFO [master/16b2ce7304d2:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=3; waited=0ms, expected min=3 server(s), max=3 server(s), master is running 2024-12-07T05:49:47,054 DEBUG [master/16b2ce7304d2:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-07T05:49:47,056 INFO [master/16b2ce7304d2:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=3 2024-12-07T05:49:47,056 INFO [master/16b2ce7304d2:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733550647056 2024-12-07T05:49:47,056 INFO [master/16b2ce7304d2:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733550707056 2024-12-07T05:49:47,056 INFO [master/16b2ce7304d2:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-12-07T05:49:47,057 INFO [master/16b2ce7304d2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=16b2ce7304d2,36221,1733550585746-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-07T05:49:47,057 INFO [master/16b2ce7304d2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=16b2ce7304d2,36221,1733550585746-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T05:49:47,057 INFO [master/16b2ce7304d2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=16b2ce7304d2,36221,1733550585746-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T05:49:47,057 INFO [master/16b2ce7304d2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-16b2ce7304d2:36221, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T05:49:47,057 INFO [master/16b2ce7304d2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-07T05:49:47,057 INFO [master/16b2ce7304d2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-07T05:49:47,060 DEBUG [master/16b2ce7304d2:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-07T05:49:47,063 INFO [master/16b2ce7304d2:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.051sec 2024-12-07T05:49:47,063 INFO [master/16b2ce7304d2:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-07T05:49:47,063 INFO [master/16b2ce7304d2:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-07T05:49:47,063 INFO [master/16b2ce7304d2:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-07T05:49:47,063 INFO [master/16b2ce7304d2:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-07T05:49:47,063 INFO [master/16b2ce7304d2:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-07T05:49:47,063 INFO [master/16b2ce7304d2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=16b2ce7304d2,36221,1733550585746-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-07T05:49:47,063 INFO [master/16b2ce7304d2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=16b2ce7304d2,36221,1733550585746-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-07T05:49:47,066 DEBUG [master/16b2ce7304d2:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-07T05:49:47,066 INFO [master/16b2ce7304d2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-07T05:49:47,066 INFO [master/16b2ce7304d2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=16b2ce7304d2,36221,1733550585746-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T05:49:47,083 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@502349d9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T05:49:47,083 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 16b2ce7304d2,36221,-1 for getting cluster id 2024-12-07T05:49:47,083 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-07T05:49:47,084 DEBUG [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '56c1930c-71e8-422f-9b3d-53dc0f52ddb2' 2024-12-07T05:49:47,084 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-07T05:49:47,085 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "56c1930c-71e8-422f-9b3d-53dc0f52ddb2" 2024-12-07T05:49:47,085 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3af9bd8b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T05:49:47,085 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [16b2ce7304d2,36221,-1] 2024-12-07T05:49:47,085 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-07T05:49:47,086 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T05:49:47,087 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44952, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-07T05:49:47,088 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6a80bb54, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T05:49:47,089 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-07T05:49:47,090 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=16b2ce7304d2,35779,1733550585922, seqNum=-1] 2024-12-07T05:49:47,091 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T05:49:47,093 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-9-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41134, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T05:49:47,096 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=16b2ce7304d2,36221,1733550585746 2024-12-07T05:49:47,097 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-07T05:49:47,098 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] client.AsyncConnectionImpl(321): The fetched master address is 16b2ce7304d2,36221,1733550585746 2024-12-07T05:49:47,098 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@59bee100 2024-12-07T05:49:47,098 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-07T05:49:47,100 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44954, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-07T05:49:47,101 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36221 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-07T05:49:47,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36221 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC 2024-12-07T05:49:47,105 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_PRE_OPERATION 2024-12-07T05:49:47,105 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T05:49:47,105 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36221 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestHBaseWalOnEC" procId is: 4 2024-12-07T05:49:47,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36221 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-07T05:49:47,107 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-07T05:49:47,115 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36487 is added to blk_1073741837_1013 (size=392) 2024-12-07T05:49:47,116 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42355 is added to blk_1073741837_1013 (size=392) 2024-12-07T05:49:47,116 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42613 is added to blk_1073741837_1013 (size=392) 2024-12-07T05:49:47,118 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => d2ed1836964bedf9bbae1b9174957684, NAME => 'TestHBaseWalOnEC,,1733550587101.d2ed1836964bedf9bbae1b9174957684.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:42677/user/jenkins/test-data/befe2273-29f2-c7c5-fd12-030f72b174d8 2024-12-07T05:49:47,127 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42613 is added to blk_1073741838_1014 (size=51) 2024-12-07T05:49:47,127 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42355 is added to blk_1073741838_1014 (size=51) 2024-12-07T05:49:47,127 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36487 is added to blk_1073741838_1014 (size=51) 2024-12-07T05:49:47,128 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1733550587101.d2ed1836964bedf9bbae1b9174957684.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T05:49:47,129 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1722): Closing d2ed1836964bedf9bbae1b9174957684, disabling compactions & flushes 2024-12-07T05:49:47,129 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1733550587101.d2ed1836964bedf9bbae1b9174957684. 2024-12-07T05:49:47,129 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1733550587101.d2ed1836964bedf9bbae1b9174957684. 2024-12-07T05:49:47,129 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1733550587101.d2ed1836964bedf9bbae1b9174957684. after waiting 0 ms 2024-12-07T05:49:47,129 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1733550587101.d2ed1836964bedf9bbae1b9174957684. 2024-12-07T05:49:47,129 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1733550587101.d2ed1836964bedf9bbae1b9174957684. 2024-12-07T05:49:47,129 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1676): Region close journal for d2ed1836964bedf9bbae1b9174957684: Waiting for close lock at 1733550587128Disabling compacts and flushes for region at 1733550587128Disabling writes for close at 1733550587129 (+1 ms)Writing region close event to WAL at 1733550587129Closed at 1733550587129 2024-12-07T05:49:47,131 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ADD_TO_META 2024-12-07T05:49:47,132 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestHBaseWalOnEC,,1733550587101.d2ed1836964bedf9bbae1b9174957684.","families":{"info":[{"qualifier":"regioninfo","vlen":50,"tag":[],"timestamp":"1733550587131"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733550587131"}]},"ts":"1733550587131"} 2024-12-07T05:49:47,135 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-07T05:49:47,137 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-07T05:49:47,138 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733550587137"}]},"ts":"1733550587137"} 2024-12-07T05:49:47,141 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLING in hbase:meta 2024-12-07T05:49:47,141 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {16b2ce7304d2=0} racks are {/default-rack=0} 2024-12-07T05:49:47,142 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-07T05:49:47,142 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-07T05:49:47,142 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-07T05:49:47,142 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-07T05:49:47,142 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-07T05:49:47,142 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-07T05:49:47,142 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-07T05:49:47,142 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-07T05:49:47,142 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-07T05:49:47,142 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-07T05:49:47,142 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=d2ed1836964bedf9bbae1b9174957684, ASSIGN}] 2024-12-07T05:49:47,145 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=d2ed1836964bedf9bbae1b9174957684, ASSIGN 2024-12-07T05:49:47,146 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=d2ed1836964bedf9bbae1b9174957684, ASSIGN; state=OFFLINE, location=16b2ce7304d2,46815,1733550585889; forceNewPlan=false, retain=false 2024-12-07T05:49:47,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36221 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-07T05:49:47,297 INFO [16b2ce7304d2:36221 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-07T05:49:47,297 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=d2ed1836964bedf9bbae1b9174957684, regionState=OPENING, regionLocation=16b2ce7304d2,46815,1733550585889 2024-12-07T05:49:47,301 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-10-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=d2ed1836964bedf9bbae1b9174957684, ASSIGN because future has completed 2024-12-07T05:49:47,302 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure d2ed1836964bedf9bbae1b9174957684, server=16b2ce7304d2,46815,1733550585889}] 2024-12-07T05:49:47,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36221 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-07T05:49:47,456 DEBUG [RSProcedureDispatcher-pool-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-07T05:49:47,457 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38989, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-07T05:49:47,462 INFO [RS_OPEN_REGION-regionserver/16b2ce7304d2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestHBaseWalOnEC,,1733550587101.d2ed1836964bedf9bbae1b9174957684. 2024-12-07T05:49:47,463 DEBUG [RS_OPEN_REGION-regionserver/16b2ce7304d2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => d2ed1836964bedf9bbae1b9174957684, NAME => 'TestHBaseWalOnEC,,1733550587101.d2ed1836964bedf9bbae1b9174957684.', STARTKEY => '', ENDKEY => ''} 2024-12-07T05:49:47,463 DEBUG [RS_OPEN_REGION-regionserver/16b2ce7304d2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestHBaseWalOnEC d2ed1836964bedf9bbae1b9174957684 2024-12-07T05:49:47,463 DEBUG [RS_OPEN_REGION-regionserver/16b2ce7304d2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1733550587101.d2ed1836964bedf9bbae1b9174957684.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T05:49:47,463 DEBUG [RS_OPEN_REGION-regionserver/16b2ce7304d2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for d2ed1836964bedf9bbae1b9174957684 2024-12-07T05:49:47,463 DEBUG [RS_OPEN_REGION-regionserver/16b2ce7304d2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for d2ed1836964bedf9bbae1b9174957684 2024-12-07T05:49:47,466 INFO [StoreOpener-d2ed1836964bedf9bbae1b9174957684-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region d2ed1836964bedf9bbae1b9174957684 2024-12-07T05:49:47,468 INFO [StoreOpener-d2ed1836964bedf9bbae1b9174957684-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d2ed1836964bedf9bbae1b9174957684 columnFamilyName cf 2024-12-07T05:49:47,468 DEBUG [StoreOpener-d2ed1836964bedf9bbae1b9174957684-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T05:49:47,469 INFO [StoreOpener-d2ed1836964bedf9bbae1b9174957684-1 {}] regionserver.HStore(327): Store=d2ed1836964bedf9bbae1b9174957684/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T05:49:47,469 DEBUG [RS_OPEN_REGION-regionserver/16b2ce7304d2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for d2ed1836964bedf9bbae1b9174957684 2024-12-07T05:49:47,470 DEBUG [RS_OPEN_REGION-regionserver/16b2ce7304d2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42677/user/jenkins/test-data/befe2273-29f2-c7c5-fd12-030f72b174d8/data/default/TestHBaseWalOnEC/d2ed1836964bedf9bbae1b9174957684 2024-12-07T05:49:47,471 DEBUG [RS_OPEN_REGION-regionserver/16b2ce7304d2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42677/user/jenkins/test-data/befe2273-29f2-c7c5-fd12-030f72b174d8/data/default/TestHBaseWalOnEC/d2ed1836964bedf9bbae1b9174957684 2024-12-07T05:49:47,471 DEBUG [RS_OPEN_REGION-regionserver/16b2ce7304d2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for d2ed1836964bedf9bbae1b9174957684 2024-12-07T05:49:47,471 DEBUG [RS_OPEN_REGION-regionserver/16b2ce7304d2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for d2ed1836964bedf9bbae1b9174957684 2024-12-07T05:49:47,473 DEBUG [RS_OPEN_REGION-regionserver/16b2ce7304d2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for d2ed1836964bedf9bbae1b9174957684 2024-12-07T05:49:47,476 DEBUG [RS_OPEN_REGION-regionserver/16b2ce7304d2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42677/user/jenkins/test-data/befe2273-29f2-c7c5-fd12-030f72b174d8/data/default/TestHBaseWalOnEC/d2ed1836964bedf9bbae1b9174957684/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-07T05:49:47,476 INFO [RS_OPEN_REGION-regionserver/16b2ce7304d2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened d2ed1836964bedf9bbae1b9174957684; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=63221967, jitterRate=-0.057919278740882874}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-07T05:49:47,476 DEBUG [RS_OPEN_REGION-regionserver/16b2ce7304d2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for d2ed1836964bedf9bbae1b9174957684 2024-12-07T05:49:47,477 DEBUG [RS_OPEN_REGION-regionserver/16b2ce7304d2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for d2ed1836964bedf9bbae1b9174957684: Running coprocessor pre-open hook at 1733550587464Writing region info on filesystem at 1733550587464Initializing all the Stores at 1733550587465 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733550587465Cleaning up temporary data from old regions at 1733550587471 (+6 ms)Running coprocessor post-open hooks at 1733550587476 (+5 ms)Region opened successfully at 1733550587477 (+1 ms) 2024-12-07T05:49:47,478 INFO [RS_OPEN_REGION-regionserver/16b2ce7304d2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestHBaseWalOnEC,,1733550587101.d2ed1836964bedf9bbae1b9174957684., pid=6, masterSystemTime=1733550587455 2024-12-07T05:49:47,483 DEBUG [RS_OPEN_REGION-regionserver/16b2ce7304d2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestHBaseWalOnEC,,1733550587101.d2ed1836964bedf9bbae1b9174957684. 2024-12-07T05:49:47,483 INFO [RS_OPEN_REGION-regionserver/16b2ce7304d2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestHBaseWalOnEC,,1733550587101.d2ed1836964bedf9bbae1b9174957684. 2024-12-07T05:49:47,483 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=d2ed1836964bedf9bbae1b9174957684, regionState=OPEN, openSeqNum=2, regionLocation=16b2ce7304d2,46815,1733550585889 2024-12-07T05:49:47,486 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-10-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure d2ed1836964bedf9bbae1b9174957684, server=16b2ce7304d2,46815,1733550585889 because future has completed 2024-12-07T05:49:47,493 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-07T05:49:47,494 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure d2ed1836964bedf9bbae1b9174957684, server=16b2ce7304d2,46815,1733550585889 in 188 msec 2024-12-07T05:49:47,497 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-07T05:49:47,497 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=d2ed1836964bedf9bbae1b9174957684, ASSIGN in 351 msec 2024-12-07T05:49:47,499 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-07T05:49:47,499 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733550587499"}]},"ts":"1733550587499"} 2024-12-07T05:49:47,503 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLED in hbase:meta 2024-12-07T05:49:47,504 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_POST_OPERATION 2024-12-07T05:49:47,508 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC in 404 msec 2024-12-07T05:49:47,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36221 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-07T05:49:47,736 INFO [RPCClient-NioEventLoopGroup-6-8 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestHBaseWalOnEC completed 2024-12-07T05:49:47,736 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table TestHBaseWalOnEC get assigned. Timeout = 60000ms 2024-12-07T05:49:47,737 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-07T05:49:47,741 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table TestHBaseWalOnEC assigned to meta. Checking AM states. 2024-12-07T05:49:47,741 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-07T05:49:47,741 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table TestHBaseWalOnEC assigned. 2024-12-07T05:49:47,746 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestHBaseWalOnEC', row='row', locateType=CURRENT is [region=TestHBaseWalOnEC,,1733550587101.d2ed1836964bedf9bbae1b9174957684., hostname=16b2ce7304d2,46815,1733550585889, seqNum=2] 2024-12-07T05:49:47,747 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T05:49:47,750 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35310, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T05:49:47,755 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36221 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestHBaseWalOnEC 2024-12-07T05:49:47,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36221 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC 2024-12-07T05:49:47,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36221 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-07T05:49:47,758 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_PREPARE 2024-12-07T05:49:47,759 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-07T05:49:47,759 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-07T05:49:47,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36221 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-07T05:49:47,893 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-07T05:49:47,893 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-12-07T05:49:47,896 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-07T05:49:47,896 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-12-07T05:49:47,897 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-12-07T05:49:47,897 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-12-07T05:49:47,899 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestHBaseWalOnEC 2024-12-07T05:49:47,899 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestHBaseWalOnEC Metrics about Tables on a single HBase RegionServer 2024-12-07T05:49:47,913 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46815 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-12-07T05:49:47,914 DEBUG [RS_FLUSH_OPERATIONS-regionserver/16b2ce7304d2:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestHBaseWalOnEC,,1733550587101.d2ed1836964bedf9bbae1b9174957684. 2024-12-07T05:49:47,914 INFO [RS_FLUSH_OPERATIONS-regionserver/16b2ce7304d2:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing d2ed1836964bedf9bbae1b9174957684 1/1 column families, dataSize=32 B heapSize=360 B 2024-12-07T05:49:47,930 DEBUG [RS_FLUSH_OPERATIONS-regionserver/16b2ce7304d2:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42677/user/jenkins/test-data/befe2273-29f2-c7c5-fd12-030f72b174d8/data/default/TestHBaseWalOnEC/d2ed1836964bedf9bbae1b9174957684/.tmp/cf/9b4e7f15c8a74abb97bfc0beae00c555 is 36, key is row/cf:cq/1733550587751/Put/seqid=0 2024-12-07T05:49:47,937 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42355 is added to blk_1073741839_1015 (size=4787) 2024-12-07T05:49:47,937 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36487 is added to blk_1073741839_1015 (size=4787) 2024-12-07T05:49:47,938 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42613 is added to blk_1073741839_1015 (size=4787) 2024-12-07T05:49:47,938 INFO [RS_FLUSH_OPERATIONS-regionserver/16b2ce7304d2:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=32 B at sequenceid=5 (bloomFilter=false), to=hdfs://localhost:42677/user/jenkins/test-data/befe2273-29f2-c7c5-fd12-030f72b174d8/data/default/TestHBaseWalOnEC/d2ed1836964bedf9bbae1b9174957684/.tmp/cf/9b4e7f15c8a74abb97bfc0beae00c555 2024-12-07T05:49:47,948 DEBUG [RS_FLUSH_OPERATIONS-regionserver/16b2ce7304d2:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42677/user/jenkins/test-data/befe2273-29f2-c7c5-fd12-030f72b174d8/data/default/TestHBaseWalOnEC/d2ed1836964bedf9bbae1b9174957684/.tmp/cf/9b4e7f15c8a74abb97bfc0beae00c555 as hdfs://localhost:42677/user/jenkins/test-data/befe2273-29f2-c7c5-fd12-030f72b174d8/data/default/TestHBaseWalOnEC/d2ed1836964bedf9bbae1b9174957684/cf/9b4e7f15c8a74abb97bfc0beae00c555 2024-12-07T05:49:47,957 INFO [RS_FLUSH_OPERATIONS-regionserver/16b2ce7304d2:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42677/user/jenkins/test-data/befe2273-29f2-c7c5-fd12-030f72b174d8/data/default/TestHBaseWalOnEC/d2ed1836964bedf9bbae1b9174957684/cf/9b4e7f15c8a74abb97bfc0beae00c555, entries=1, sequenceid=5, filesize=4.7 K 2024-12-07T05:49:47,958 INFO [RS_FLUSH_OPERATIONS-regionserver/16b2ce7304d2:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~32 B/32, heapSize ~344 B/344, currentSize=0 B/0 for d2ed1836964bedf9bbae1b9174957684 in 44ms, sequenceid=5, compaction requested=false 2024-12-07T05:49:47,958 DEBUG [RS_FLUSH_OPERATIONS-regionserver/16b2ce7304d2:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for d2ed1836964bedf9bbae1b9174957684: 2024-12-07T05:49:47,959 DEBUG [RS_FLUSH_OPERATIONS-regionserver/16b2ce7304d2:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestHBaseWalOnEC,,1733550587101.d2ed1836964bedf9bbae1b9174957684. 2024-12-07T05:49:47,959 DEBUG [RS_FLUSH_OPERATIONS-regionserver/16b2ce7304d2:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-12-07T05:49:47,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36221 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-12-07T05:49:47,964 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-12-07T05:49:47,964 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 202 msec 2024-12-07T05:49:47,967 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC in 210 msec 2024-12-07T05:49:48,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36221 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-07T05:49:48,076 INFO [RPCClient-NioEventLoopGroup-6-8 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestHBaseWalOnEC completed 2024-12-07T05:49:48,082 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-07T05:49:48,083 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-07T05:49:48,083 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-07T05:49:48,083 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T05:49:48,083 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T05:49:48,083 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-07T05:49:48,084 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-07T05:49:48,084 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1158063075, stopped=false 2024-12-07T05:49:48,084 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=16b2ce7304d2,36221,1733550585746 2024-12-07T05:49:48,153 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41867-0x101afa04a420003, quorum=127.0.0.1:64227, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-07T05:49:48,153 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36221-0x101afa04a420000, quorum=127.0.0.1:64227, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-07T05:49:48,153 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46815-0x101afa04a420001, quorum=127.0.0.1:64227, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-07T05:49:48,153 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35779-0x101afa04a420002, quorum=127.0.0.1:64227, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-07T05:49:48,153 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46815-0x101afa04a420001, quorum=127.0.0.1:64227, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T05:49:48,153 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35779-0x101afa04a420002, quorum=127.0.0.1:64227, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T05:49:48,153 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36221-0x101afa04a420000, quorum=127.0.0.1:64227, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T05:49:48,153 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41867-0x101afa04a420003, quorum=127.0.0.1:64227, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T05:49:48,153 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-07T05:49:48,154 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-07T05:49:48,154 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-07T05:49:48,154 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T05:49:48,154 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:35779-0x101afa04a420002, quorum=127.0.0.1:64227, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-07T05:49:48,155 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:46815-0x101afa04a420001, quorum=127.0.0.1:64227, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-07T05:49:48,155 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:41867-0x101afa04a420003, quorum=127.0.0.1:64227, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-07T05:49:48,155 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '16b2ce7304d2,46815,1733550585889' ***** 2024-12-07T05:49:48,155 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-07T05:49:48,155 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '16b2ce7304d2,35779,1733550585922' ***** 2024-12-07T05:49:48,155 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:36221-0x101afa04a420000, quorum=127.0.0.1:64227, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-07T05:49:48,155 INFO [RS:0;16b2ce7304d2:46815 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-07T05:49:48,156 INFO [RS:0;16b2ce7304d2:46815 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-07T05:49:48,156 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-07T05:49:48,156 INFO [RS:0;16b2ce7304d2:46815 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-07T05:49:48,156 INFO [RS:0;16b2ce7304d2:46815 {}] regionserver.HRegionServer(3091): Received CLOSE for d2ed1836964bedf9bbae1b9174957684 2024-12-07T05:49:48,156 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-07T05:49:48,156 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '16b2ce7304d2,41867,1733550585957' ***** 2024-12-07T05:49:48,156 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-07T05:49:48,156 INFO [RS:0;16b2ce7304d2:46815 {}] regionserver.HRegionServer(959): stopping server 16b2ce7304d2,46815,1733550585889 2024-12-07T05:49:48,156 INFO [RS:0;16b2ce7304d2:46815 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-07T05:49:48,156 INFO [RS:1;16b2ce7304d2:35779 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-07T05:49:48,156 INFO [RS:0;16b2ce7304d2:46815 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;16b2ce7304d2:46815. 2024-12-07T05:49:48,156 INFO [RS:2;16b2ce7304d2:41867 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-07T05:49:48,156 DEBUG [RS_CLOSE_REGION-regionserver/16b2ce7304d2:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing d2ed1836964bedf9bbae1b9174957684, disabling compactions & flushes 2024-12-07T05:49:48,157 DEBUG [RS:0;16b2ce7304d2:46815 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-07T05:49:48,157 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-07T05:49:48,157 DEBUG [RS:0;16b2ce7304d2:46815 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T05:49:48,157 INFO [RS_CLOSE_REGION-regionserver/16b2ce7304d2:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1733550587101.d2ed1836964bedf9bbae1b9174957684. 2024-12-07T05:49:48,157 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-07T05:49:48,157 DEBUG [RS_CLOSE_REGION-regionserver/16b2ce7304d2:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1733550587101.d2ed1836964bedf9bbae1b9174957684. 2024-12-07T05:49:48,157 DEBUG [RS_CLOSE_REGION-regionserver/16b2ce7304d2:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1733550587101.d2ed1836964bedf9bbae1b9174957684. after waiting 0 ms 2024-12-07T05:49:48,157 INFO [RS:0;16b2ce7304d2:46815 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-12-07T05:49:48,157 DEBUG [RS:0;16b2ce7304d2:46815 {}] regionserver.HRegionServer(1325): Online Regions={d2ed1836964bedf9bbae1b9174957684=TestHBaseWalOnEC,,1733550587101.d2ed1836964bedf9bbae1b9174957684.} 2024-12-07T05:49:48,157 DEBUG [RS_CLOSE_REGION-regionserver/16b2ce7304d2:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1733550587101.d2ed1836964bedf9bbae1b9174957684. 2024-12-07T05:49:48,157 DEBUG [RS:0;16b2ce7304d2:46815 {}] regionserver.HRegionServer(1351): Waiting on d2ed1836964bedf9bbae1b9174957684 2024-12-07T05:49:48,157 INFO [RS:2;16b2ce7304d2:41867 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-07T05:49:48,157 INFO [RS:2;16b2ce7304d2:41867 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-07T05:49:48,157 INFO [RS:2;16b2ce7304d2:41867 {}] regionserver.HRegionServer(959): stopping server 16b2ce7304d2,41867,1733550585957 2024-12-07T05:49:48,157 INFO [RS:2;16b2ce7304d2:41867 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-07T05:49:48,158 INFO [RS:2;16b2ce7304d2:41867 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:2;16b2ce7304d2:41867. 2024-12-07T05:49:48,158 DEBUG [RS:2;16b2ce7304d2:41867 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-07T05:49:48,158 DEBUG [RS:2;16b2ce7304d2:41867 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T05:49:48,158 INFO [RS:2;16b2ce7304d2:41867 {}] regionserver.HRegionServer(976): stopping server 16b2ce7304d2,41867,1733550585957; all regions closed. 2024-12-07T05:49:48,158 INFO [RS:1;16b2ce7304d2:35779 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-07T05:49:48,158 INFO [RS:1;16b2ce7304d2:35779 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-07T05:49:48,158 INFO [RS:1;16b2ce7304d2:35779 {}] regionserver.HRegionServer(959): stopping server 16b2ce7304d2,35779,1733550585922 2024-12-07T05:49:48,158 INFO [RS:1;16b2ce7304d2:35779 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-07T05:49:48,159 INFO [RS:1;16b2ce7304d2:35779 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;16b2ce7304d2:35779. 2024-12-07T05:49:48,159 DEBUG [RS:1;16b2ce7304d2:35779 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-07T05:49:48,159 DEBUG [RS:1;16b2ce7304d2:35779 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T05:49:48,159 INFO [RS:1;16b2ce7304d2:35779 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-07T05:49:48,159 INFO [RS:1;16b2ce7304d2:35779 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-07T05:49:48,159 INFO [RS:1;16b2ce7304d2:35779 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-07T05:49:48,159 INFO [RS:1;16b2ce7304d2:35779 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-07T05:49:48,160 INFO [RS:1;16b2ce7304d2:35779 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-12-07T05:49:48,160 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T05:49:48,160 DEBUG [RS:1;16b2ce7304d2:35779 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-12-07T05:49:48,160 DEBUG [RS:1;16b2ce7304d2:35779 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-12-07T05:49:48,160 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T05:49:48,160 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T05:49:48,160 DEBUG [RS_CLOSE_META-regionserver/16b2ce7304d2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-07T05:49:48,160 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T05:49:48,160 INFO [RS_CLOSE_META-regionserver/16b2ce7304d2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-07T05:49:48,160 DEBUG [RS_CLOSE_META-regionserver/16b2ce7304d2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-07T05:49:48,160 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T05:49:48,160 DEBUG [RS_CLOSE_META-regionserver/16b2ce7304d2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-07T05:49:48,161 DEBUG [RS_CLOSE_META-regionserver/16b2ce7304d2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-07T05:49:48,161 INFO [RS_CLOSE_META-regionserver/16b2ce7304d2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.34 KB heapSize=3.38 KB 2024-12-07T05:49:48,164 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36487 is added to blk_1073741835_1011 (size=93) 2024-12-07T05:49:48,164 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42355 is added to blk_1073741835_1011 (size=93) 2024-12-07T05:49:48,165 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42613 is added to blk_1073741835_1011 (size=93) 2024-12-07T05:49:48,165 DEBUG [RS_CLOSE_REGION-regionserver/16b2ce7304d2:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42677/user/jenkins/test-data/befe2273-29f2-c7c5-fd12-030f72b174d8/data/default/TestHBaseWalOnEC/d2ed1836964bedf9bbae1b9174957684/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-07T05:49:48,167 INFO [RS_CLOSE_REGION-regionserver/16b2ce7304d2:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1733550587101.d2ed1836964bedf9bbae1b9174957684. 2024-12-07T05:49:48,167 DEBUG [RS_CLOSE_REGION-regionserver/16b2ce7304d2:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for d2ed1836964bedf9bbae1b9174957684: Waiting for close lock at 1733550588156Running coprocessor pre-close hooks at 1733550588156Disabling compacts and flushes for region at 1733550588156Disabling writes for close at 1733550588157 (+1 ms)Writing region close event to WAL at 1733550588158 (+1 ms)Running coprocessor post-close hooks at 1733550588166 (+8 ms)Closed at 1733550588166 2024-12-07T05:49:48,167 DEBUG [RS_CLOSE_REGION-regionserver/16b2ce7304d2:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestHBaseWalOnEC,,1733550587101.d2ed1836964bedf9bbae1b9174957684. 2024-12-07T05:49:48,167 DEBUG [RS:2;16b2ce7304d2:41867 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/befe2273-29f2-c7c5-fd12-030f72b174d8/oldWALs 2024-12-07T05:49:48,167 INFO [RS:2;16b2ce7304d2:41867 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 16b2ce7304d2%2C41867%2C1733550585957:(num 1733550586641) 2024-12-07T05:49:48,167 DEBUG [RS:2;16b2ce7304d2:41867 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T05:49:48,167 INFO [RS:2;16b2ce7304d2:41867 {}] regionserver.LeaseManager(133): Closed leases 2024-12-07T05:49:48,167 INFO [RS:2;16b2ce7304d2:41867 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-07T05:49:48,167 INFO [RS:2;16b2ce7304d2:41867 {}] hbase.ChoreService(370): Chore service for: regionserver/16b2ce7304d2:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-07T05:49:48,167 INFO [RS:2;16b2ce7304d2:41867 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-07T05:49:48,167 INFO [RS:2;16b2ce7304d2:41867 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-07T05:49:48,167 INFO [regionserver/16b2ce7304d2:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-07T05:49:48,167 INFO [RS:2;16b2ce7304d2:41867 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-07T05:49:48,168 INFO [RS:2;16b2ce7304d2:41867 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-07T05:49:48,168 INFO [RS:2;16b2ce7304d2:41867 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:41867 2024-12-07T05:49:48,178 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41867-0x101afa04a420003, quorum=127.0.0.1:64227, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/16b2ce7304d2,41867,1733550585957 2024-12-07T05:49:48,178 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36221-0x101afa04a420000, quorum=127.0.0.1:64227, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-07T05:49:48,178 INFO [RS:2;16b2ce7304d2:41867 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-07T05:49:48,178 DEBUG [RS_CLOSE_META-regionserver/16b2ce7304d2:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42677/user/jenkins/test-data/befe2273-29f2-c7c5-fd12-030f72b174d8/data/hbase/meta/1588230740/.tmp/info/6827ecdb98ec4f1a9dd40965d45a0790 is 153, key is TestHBaseWalOnEC,,1733550587101.d2ed1836964bedf9bbae1b9174957684./info:regioninfo/1733550587483/Put/seqid=0 2024-12-07T05:49:48,178 ERROR [Time-limited test-EventThread {}] zookeeper.ClientCnxn$EventThread(581): Error while calling watcher. java.util.concurrent.RejectedExecutionException: Task org.apache.hadoop.hbase.trace.TraceUtil$$Lambda$370/0x00007fa9c08f4f20@7edcbaf rejected from java.util.concurrent.ThreadPoolExecutor@3d25f8a[Shutting down, pool size = 1, active threads = 0, queued tasks = 0, completed tasks = 14] at java.util.concurrent.ThreadPoolExecutor$AbortPolicy.rejectedExecution(ThreadPoolExecutor.java:2065) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.reject(ThreadPoolExecutor.java:833) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.execute(ThreadPoolExecutor.java:1360) ~[?:?] at java.util.concurrent.Executors$DelegatedExecutorService.execute(Executors.java:721) ~[?:?] at org.apache.hadoop.hbase.zookeeper.ZKWatcher.process(ZKWatcher.java:613) ~[hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.zookeeper.ClientCnxn$EventThread.processEvent(ClientCnxn.java:579) ~[zookeeper-3.8.4.jar:3.8.4] at org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:554) ~[zookeeper-3.8.4.jar:3.8.4] 2024-12-07T05:49:48,185 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42355 is added to blk_1073741840_1016 (size=6637) 2024-12-07T05:49:48,185 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36487 is added to blk_1073741840_1016 (size=6637) 2024-12-07T05:49:48,185 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42613 is added to blk_1073741840_1016 (size=6637) 2024-12-07T05:49:48,186 INFO [RS_CLOSE_META-regionserver/16b2ce7304d2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.18 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:42677/user/jenkins/test-data/befe2273-29f2-c7c5-fd12-030f72b174d8/data/hbase/meta/1588230740/.tmp/info/6827ecdb98ec4f1a9dd40965d45a0790 2024-12-07T05:49:48,186 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [16b2ce7304d2,41867,1733550585957] 2024-12-07T05:49:48,195 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/16b2ce7304d2,41867,1733550585957 already deleted, retry=false 2024-12-07T05:49:48,195 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 16b2ce7304d2,41867,1733550585957 expired; onlineServers=2 2024-12-07T05:49:48,200 INFO [regionserver/16b2ce7304d2:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-07T05:49:48,206 INFO [regionserver/16b2ce7304d2:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-07T05:49:48,208 INFO [regionserver/16b2ce7304d2:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-07T05:49:48,210 DEBUG [RS_CLOSE_META-regionserver/16b2ce7304d2:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42677/user/jenkins/test-data/befe2273-29f2-c7c5-fd12-030f72b174d8/data/hbase/meta/1588230740/.tmp/ns/5b00446f36f2475cb08d74b22affea54 is 43, key is default/ns:d/1733550587046/Put/seqid=0 2024-12-07T05:49:48,216 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42613 is added to blk_1073741841_1017 (size=5153) 2024-12-07T05:49:48,216 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36487 is added to blk_1073741841_1017 (size=5153) 2024-12-07T05:49:48,216 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42355 is added to blk_1073741841_1017 (size=5153) 2024-12-07T05:49:48,216 INFO [RS_CLOSE_META-regionserver/16b2ce7304d2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:42677/user/jenkins/test-data/befe2273-29f2-c7c5-fd12-030f72b174d8/data/hbase/meta/1588230740/.tmp/ns/5b00446f36f2475cb08d74b22affea54 2024-12-07T05:49:48,238 DEBUG [RS_CLOSE_META-regionserver/16b2ce7304d2:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42677/user/jenkins/test-data/befe2273-29f2-c7c5-fd12-030f72b174d8/data/hbase/meta/1588230740/.tmp/table/c2a155d8699b4a16afcea6ed8335a285 is 52, key is TestHBaseWalOnEC/table:state/1733550587499/Put/seqid=0 2024-12-07T05:49:48,244 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42355 is added to blk_1073741842_1018 (size=5249) 2024-12-07T05:49:48,245 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42613 is added to blk_1073741842_1018 (size=5249) 2024-12-07T05:49:48,245 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36487 is added to blk_1073741842_1018 (size=5249) 2024-12-07T05:49:48,245 INFO [RS_CLOSE_META-regionserver/16b2ce7304d2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=96 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:42677/user/jenkins/test-data/befe2273-29f2-c7c5-fd12-030f72b174d8/data/hbase/meta/1588230740/.tmp/table/c2a155d8699b4a16afcea6ed8335a285 2024-12-07T05:49:48,255 DEBUG [RS_CLOSE_META-regionserver/16b2ce7304d2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42677/user/jenkins/test-data/befe2273-29f2-c7c5-fd12-030f72b174d8/data/hbase/meta/1588230740/.tmp/info/6827ecdb98ec4f1a9dd40965d45a0790 as hdfs://localhost:42677/user/jenkins/test-data/befe2273-29f2-c7c5-fd12-030f72b174d8/data/hbase/meta/1588230740/info/6827ecdb98ec4f1a9dd40965d45a0790 2024-12-07T05:49:48,264 INFO [RS_CLOSE_META-regionserver/16b2ce7304d2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42677/user/jenkins/test-data/befe2273-29f2-c7c5-fd12-030f72b174d8/data/hbase/meta/1588230740/info/6827ecdb98ec4f1a9dd40965d45a0790, entries=10, sequenceid=11, filesize=6.5 K 2024-12-07T05:49:48,265 DEBUG [RS_CLOSE_META-regionserver/16b2ce7304d2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42677/user/jenkins/test-data/befe2273-29f2-c7c5-fd12-030f72b174d8/data/hbase/meta/1588230740/.tmp/ns/5b00446f36f2475cb08d74b22affea54 as hdfs://localhost:42677/user/jenkins/test-data/befe2273-29f2-c7c5-fd12-030f72b174d8/data/hbase/meta/1588230740/ns/5b00446f36f2475cb08d74b22affea54 2024-12-07T05:49:48,274 INFO [RS_CLOSE_META-regionserver/16b2ce7304d2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42677/user/jenkins/test-data/befe2273-29f2-c7c5-fd12-030f72b174d8/data/hbase/meta/1588230740/ns/5b00446f36f2475cb08d74b22affea54, entries=2, sequenceid=11, filesize=5.0 K 2024-12-07T05:49:48,275 DEBUG [RS_CLOSE_META-regionserver/16b2ce7304d2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42677/user/jenkins/test-data/befe2273-29f2-c7c5-fd12-030f72b174d8/data/hbase/meta/1588230740/.tmp/table/c2a155d8699b4a16afcea6ed8335a285 as hdfs://localhost:42677/user/jenkins/test-data/befe2273-29f2-c7c5-fd12-030f72b174d8/data/hbase/meta/1588230740/table/c2a155d8699b4a16afcea6ed8335a285 2024-12-07T05:49:48,283 INFO [RS_CLOSE_META-regionserver/16b2ce7304d2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42677/user/jenkins/test-data/befe2273-29f2-c7c5-fd12-030f72b174d8/data/hbase/meta/1588230740/table/c2a155d8699b4a16afcea6ed8335a285, entries=2, sequenceid=11, filesize=5.1 K 2024-12-07T05:49:48,285 INFO [RS_CLOSE_META-regionserver/16b2ce7304d2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 124ms, sequenceid=11, compaction requested=false 2024-12-07T05:49:48,286 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41867-0x101afa04a420003, quorum=127.0.0.1:64227, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-07T05:49:48,287 INFO [RS:2;16b2ce7304d2:41867 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-07T05:49:48,287 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41867-0x101afa04a420003, quorum=127.0.0.1:64227, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-07T05:49:48,287 INFO [RS:2;16b2ce7304d2:41867 {}] regionserver.HRegionServer(1031): Exiting; stopping=16b2ce7304d2,41867,1733550585957; zookeeper connection closed. 2024-12-07T05:49:48,287 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@977173c {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@977173c 2024-12-07T05:49:48,290 DEBUG [RS_CLOSE_META-regionserver/16b2ce7304d2:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42677/user/jenkins/test-data/befe2273-29f2-c7c5-fd12-030f72b174d8/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-12-07T05:49:48,291 DEBUG [RS_CLOSE_META-regionserver/16b2ce7304d2:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-07T05:49:48,291 INFO [RS_CLOSE_META-regionserver/16b2ce7304d2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-07T05:49:48,291 DEBUG [RS_CLOSE_META-regionserver/16b2ce7304d2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733550588160Running coprocessor pre-close hooks at 1733550588160Disabling compacts and flushes for region at 1733550588160Disabling writes for close at 1733550588161 (+1 ms)Obtaining lock to block concurrent updates at 1733550588161Preparing flush snapshotting stores in 1588230740 at 1733550588161Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1377, getHeapSize=3392, getOffHeapSize=0, getCellsCount=14 at 1733550588161Flushing stores of hbase:meta,,1.1588230740 at 1733550588163 (+2 ms)Flushing 1588230740/info: creating writer at 1733550588163Flushing 1588230740/info: appending metadata at 1733550588178 (+15 ms)Flushing 1588230740/info: closing flushed file at 1733550588178Flushing 1588230740/ns: creating writer at 1733550588192 (+14 ms)Flushing 1588230740/ns: appending metadata at 1733550588209 (+17 ms)Flushing 1588230740/ns: closing flushed file at 1733550588209Flushing 1588230740/table: creating writer at 1733550588223 (+14 ms)Flushing 1588230740/table: appending metadata at 1733550588238 (+15 ms)Flushing 1588230740/table: closing flushed file at 1733550588238Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6d556214: reopening flushed file at 1733550588254 (+16 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4a1b93f4: reopening flushed file at 1733550588264 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@599e44fc: reopening flushed file at 1733550588274 (+10 ms)Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 124ms, sequenceid=11, compaction requested=false at 1733550588285 (+11 ms)Writing region close event to WAL at 1733550588286 (+1 ms)Running coprocessor post-close hooks at 1733550588291 (+5 ms)Closed at 1733550588291 2024-12-07T05:49:48,292 DEBUG [RS_CLOSE_META-regionserver/16b2ce7304d2:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-07T05:49:48,357 INFO [RS:0;16b2ce7304d2:46815 {}] regionserver.HRegionServer(976): stopping server 16b2ce7304d2,46815,1733550585889; all regions closed. 2024-12-07T05:49:48,358 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T05:49:48,358 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T05:49:48,358 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T05:49:48,358 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T05:49:48,358 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T05:49:48,360 INFO [RS:1;16b2ce7304d2:35779 {}] regionserver.HRegionServer(976): stopping server 16b2ce7304d2,35779,1733550585922; all regions closed. 2024-12-07T05:49:48,361 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T05:49:48,361 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42613 is added to blk_1073741834_1010 (size=1298) 2024-12-07T05:49:48,361 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T05:49:48,361 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T05:49:48,361 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36487 is added to blk_1073741834_1010 (size=1298) 2024-12-07T05:49:48,361 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T05:49:48,361 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T05:49:48,362 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42355 is added to blk_1073741834_1010 (size=1298) 2024-12-07T05:49:48,366 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42355 is added to blk_1073741836_1012 (size=2751) 2024-12-07T05:49:48,366 DEBUG [RS:0;16b2ce7304d2:46815 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/befe2273-29f2-c7c5-fd12-030f72b174d8/oldWALs 2024-12-07T05:49:48,366 INFO [RS:0;16b2ce7304d2:46815 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 16b2ce7304d2%2C46815%2C1733550585889:(num 1733550586641) 2024-12-07T05:49:48,366 DEBUG [RS:0;16b2ce7304d2:46815 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T05:49:48,366 INFO [RS:0;16b2ce7304d2:46815 {}] regionserver.LeaseManager(133): Closed leases 2024-12-07T05:49:48,366 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36487 is added to blk_1073741836_1012 (size=2751) 2024-12-07T05:49:48,366 INFO [RS:0;16b2ce7304d2:46815 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-07T05:49:48,366 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42613 is added to blk_1073741836_1012 (size=2751) 2024-12-07T05:49:48,366 INFO [RS:0;16b2ce7304d2:46815 {}] hbase.ChoreService(370): Chore service for: regionserver/16b2ce7304d2:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-12-07T05:49:48,366 INFO [RS:0;16b2ce7304d2:46815 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-07T05:49:48,366 INFO [regionserver/16b2ce7304d2:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-07T05:49:48,366 INFO [RS:0;16b2ce7304d2:46815 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-07T05:49:48,366 INFO [RS:0;16b2ce7304d2:46815 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-07T05:49:48,366 INFO [RS:0;16b2ce7304d2:46815 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-07T05:49:48,367 INFO [RS:0;16b2ce7304d2:46815 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:46815 2024-12-07T05:49:48,387 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36221-0x101afa04a420000, quorum=127.0.0.1:64227, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-07T05:49:48,387 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46815-0x101afa04a420001, quorum=127.0.0.1:64227, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/16b2ce7304d2,46815,1733550585889 2024-12-07T05:49:48,387 INFO [RS:0;16b2ce7304d2:46815 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-07T05:49:48,387 ERROR [Time-limited test-EventThread {}] zookeeper.ClientCnxn$EventThread(581): Error while calling watcher. java.util.concurrent.RejectedExecutionException: Task org.apache.hadoop.hbase.trace.TraceUtil$$Lambda$370/0x00007fa9c08f4f20@22cc0c37 rejected from java.util.concurrent.ThreadPoolExecutor@26f38cb6[Shutting down, pool size = 1, active threads = 0, queued tasks = 0, completed tasks = 14] at java.util.concurrent.ThreadPoolExecutor$AbortPolicy.rejectedExecution(ThreadPoolExecutor.java:2065) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.reject(ThreadPoolExecutor.java:833) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.execute(ThreadPoolExecutor.java:1365) ~[?:?] at java.util.concurrent.Executors$DelegatedExecutorService.execute(Executors.java:721) ~[?:?] at org.apache.hadoop.hbase.zookeeper.ZKWatcher.process(ZKWatcher.java:613) ~[hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.zookeeper.ClientCnxn$EventThread.processEvent(ClientCnxn.java:579) ~[zookeeper-3.8.4.jar:3.8.4] at org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:554) ~[zookeeper-3.8.4.jar:3.8.4] 2024-12-07T05:49:48,395 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [16b2ce7304d2,46815,1733550585889] 2024-12-07T05:49:48,403 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/16b2ce7304d2,46815,1733550585889 already deleted, retry=false 2024-12-07T05:49:48,403 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 16b2ce7304d2,46815,1733550585889 expired; onlineServers=1 2024-12-07T05:49:48,495 INFO [RS:0;16b2ce7304d2:46815 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-07T05:49:48,495 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46815-0x101afa04a420001, quorum=127.0.0.1:64227, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-07T05:49:48,495 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46815-0x101afa04a420001, quorum=127.0.0.1:64227, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-07T05:49:48,495 INFO [RS:0;16b2ce7304d2:46815 {}] regionserver.HRegionServer(1031): Exiting; stopping=16b2ce7304d2,46815,1733550585889; zookeeper connection closed. 2024-12-07T05:49:48,496 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@1d485c19 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@1d485c19 2024-12-07T05:49:48,498 INFO [regionserver/16b2ce7304d2:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-07T05:49:48,498 INFO [regionserver/16b2ce7304d2:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-07T05:49:48,775 DEBUG [RS:1;16b2ce7304d2:35779 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/befe2273-29f2-c7c5-fd12-030f72b174d8/oldWALs 2024-12-07T05:49:48,775 INFO [RS:1;16b2ce7304d2:35779 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 16b2ce7304d2%2C35779%2C1733550585922.meta:.meta(num 1733550586970) 2024-12-07T05:49:48,776 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T05:49:48,776 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T05:49:48,776 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T05:49:48,776 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T05:49:48,776 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T05:49:48,779 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42613 is added to blk_1073741833_1009 (size=93) 2024-12-07T05:49:48,779 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42355 is added to blk_1073741833_1009 (size=93) 2024-12-07T05:49:48,780 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36487 is added to blk_1073741833_1009 (size=93) 2024-12-07T05:49:48,783 DEBUG [RS:1;16b2ce7304d2:35779 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/befe2273-29f2-c7c5-fd12-030f72b174d8/oldWALs 2024-12-07T05:49:48,783 INFO [RS:1;16b2ce7304d2:35779 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 16b2ce7304d2%2C35779%2C1733550585922:(num 1733550586636) 2024-12-07T05:49:48,783 DEBUG [RS:1;16b2ce7304d2:35779 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T05:49:48,783 INFO [RS:1;16b2ce7304d2:35779 {}] regionserver.LeaseManager(133): Closed leases 2024-12-07T05:49:48,783 INFO [RS:1;16b2ce7304d2:35779 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-07T05:49:48,783 INFO [RS:1;16b2ce7304d2:35779 {}] hbase.ChoreService(370): Chore service for: regionserver/16b2ce7304d2:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-12-07T05:49:48,783 INFO [RS:1;16b2ce7304d2:35779 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-07T05:49:48,783 INFO [regionserver/16b2ce7304d2:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-07T05:49:48,784 INFO [RS:1;16b2ce7304d2:35779 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:35779 2024-12-07T05:49:48,820 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35779-0x101afa04a420002, quorum=127.0.0.1:64227, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/16b2ce7304d2,35779,1733550585922 2024-12-07T05:49:48,820 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36221-0x101afa04a420000, quorum=127.0.0.1:64227, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-07T05:49:48,820 INFO [RS:1;16b2ce7304d2:35779 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-07T05:49:48,828 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [16b2ce7304d2,35779,1733550585922] 2024-12-07T05:49:48,836 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/16b2ce7304d2,35779,1733550585922 already deleted, retry=false 2024-12-07T05:49:48,837 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 16b2ce7304d2,35779,1733550585922 expired; onlineServers=0 2024-12-07T05:49:48,837 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '16b2ce7304d2,36221,1733550585746' ***** 2024-12-07T05:49:48,837 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-07T05:49:48,837 INFO [M:0;16b2ce7304d2:36221 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-07T05:49:48,837 INFO [M:0;16b2ce7304d2:36221 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-07T05:49:48,837 DEBUG [M:0;16b2ce7304d2:36221 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-07T05:49:48,837 DEBUG [M:0;16b2ce7304d2:36221 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-07T05:49:48,837 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-07T05:49:48,837 DEBUG [master/16b2ce7304d2:0:becomeActiveMaster-HFileCleaner.large.0-1733550586289 {}] cleaner.HFileCleaner(306): Exit Thread[master/16b2ce7304d2:0:becomeActiveMaster-HFileCleaner.large.0-1733550586289,5,FailOnTimeoutGroup] 2024-12-07T05:49:48,837 DEBUG [master/16b2ce7304d2:0:becomeActiveMaster-HFileCleaner.small.0-1733550586289 {}] cleaner.HFileCleaner(306): Exit Thread[master/16b2ce7304d2:0:becomeActiveMaster-HFileCleaner.small.0-1733550586289,5,FailOnTimeoutGroup] 2024-12-07T05:49:48,838 INFO [M:0;16b2ce7304d2:36221 {}] hbase.ChoreService(370): Chore service for: master/16b2ce7304d2:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-07T05:49:48,838 INFO [M:0;16b2ce7304d2:36221 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-07T05:49:48,839 DEBUG [M:0;16b2ce7304d2:36221 {}] master.HMaster(1795): Stopping service threads 2024-12-07T05:49:48,839 INFO [M:0;16b2ce7304d2:36221 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-07T05:49:48,839 INFO [M:0;16b2ce7304d2:36221 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-07T05:49:48,839 INFO [M:0;16b2ce7304d2:36221 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-07T05:49:48,840 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-07T05:49:48,845 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36221-0x101afa04a420000, quorum=127.0.0.1:64227, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-07T05:49:48,845 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36221-0x101afa04a420000, quorum=127.0.0.1:64227, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T05:49:48,845 DEBUG [M:0;16b2ce7304d2:36221 {}] zookeeper.ZKUtil(347): master:36221-0x101afa04a420000, quorum=127.0.0.1:64227, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-07T05:49:48,845 WARN [M:0;16b2ce7304d2:36221 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-07T05:49:48,846 INFO [M:0;16b2ce7304d2:36221 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:42677/user/jenkins/test-data/befe2273-29f2-c7c5-fd12-030f72b174d8/.lastflushedseqids 2024-12-07T05:49:48,855 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42355 is added to blk_1073741843_1019 (size=127) 2024-12-07T05:49:48,855 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36487 is added to blk_1073741843_1019 (size=127) 2024-12-07T05:49:48,855 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42613 is added to blk_1073741843_1019 (size=127) 2024-12-07T05:49:48,856 INFO [M:0;16b2ce7304d2:36221 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-07T05:49:48,856 INFO [M:0;16b2ce7304d2:36221 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-07T05:49:48,856 DEBUG [M:0;16b2ce7304d2:36221 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-07T05:49:48,856 INFO [M:0;16b2ce7304d2:36221 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T05:49:48,856 DEBUG [M:0;16b2ce7304d2:36221 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T05:49:48,856 DEBUG [M:0;16b2ce7304d2:36221 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-07T05:49:48,856 DEBUG [M:0;16b2ce7304d2:36221 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T05:49:48,857 INFO [M:0;16b2ce7304d2:36221 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=26.83 KB heapSize=34.12 KB 2024-12-07T05:49:48,873 DEBUG [M:0;16b2ce7304d2:36221 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42677/user/jenkins/test-data/befe2273-29f2-c7c5-fd12-030f72b174d8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/2c095bf014c14d7aa34a0f5bbfa3cb02 is 82, key is hbase:meta,,1/info:regioninfo/1733550587005/Put/seqid=0 2024-12-07T05:49:48,879 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42613 is added to blk_1073741844_1020 (size=5672) 2024-12-07T05:49:48,879 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36487 is added to blk_1073741844_1020 (size=5672) 2024-12-07T05:49:48,880 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42355 is added to blk_1073741844_1020 (size=5672) 2024-12-07T05:49:48,880 INFO [M:0;16b2ce7304d2:36221 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:42677/user/jenkins/test-data/befe2273-29f2-c7c5-fd12-030f72b174d8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/2c095bf014c14d7aa34a0f5bbfa3cb02 2024-12-07T05:49:48,900 DEBUG [M:0;16b2ce7304d2:36221 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42677/user/jenkins/test-data/befe2273-29f2-c7c5-fd12-030f72b174d8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/7e980a838f05450f87d7e388a44bfd5d is 748, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1733550587507/Put/seqid=0 2024-12-07T05:49:48,906 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36487 is added to blk_1073741845_1021 (size=6439) 2024-12-07T05:49:48,907 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42355 is added to blk_1073741845_1021 (size=6439) 2024-12-07T05:49:48,907 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42613 is added to blk_1073741845_1021 (size=6439) 2024-12-07T05:49:48,907 INFO [M:0;16b2ce7304d2:36221 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.14 KB at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:42677/user/jenkins/test-data/befe2273-29f2-c7c5-fd12-030f72b174d8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/7e980a838f05450f87d7e388a44bfd5d 2024-12-07T05:49:48,927 DEBUG [M:0;16b2ce7304d2:36221 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42677/user/jenkins/test-data/befe2273-29f2-c7c5-fd12-030f72b174d8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/fe33fafd24254d93be6f23313087f6f9 is 69, key is 16b2ce7304d2,35779,1733550585922/rs:state/1733550586448/Put/seqid=0 2024-12-07T05:49:48,929 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35779-0x101afa04a420002, quorum=127.0.0.1:64227, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-07T05:49:48,929 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35779-0x101afa04a420002, quorum=127.0.0.1:64227, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-07T05:49:48,929 INFO [RS:1;16b2ce7304d2:35779 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-07T05:49:48,929 INFO [RS:1;16b2ce7304d2:35779 {}] regionserver.HRegionServer(1031): Exiting; stopping=16b2ce7304d2,35779,1733550585922; zookeeper connection closed. 2024-12-07T05:49:48,929 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@13f8692e {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@13f8692e 2024-12-07T05:49:48,929 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 3 regionserver(s) complete 2024-12-07T05:49:48,933 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42355 is added to blk_1073741846_1022 (size=5294) 2024-12-07T05:49:48,934 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42613 is added to blk_1073741846_1022 (size=5294) 2024-12-07T05:49:48,934 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36487 is added to blk_1073741846_1022 (size=5294) 2024-12-07T05:49:48,934 INFO [M:0;16b2ce7304d2:36221 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=195 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:42677/user/jenkins/test-data/befe2273-29f2-c7c5-fd12-030f72b174d8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/fe33fafd24254d93be6f23313087f6f9 2024-12-07T05:49:48,940 DEBUG [M:0;16b2ce7304d2:36221 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42677/user/jenkins/test-data/befe2273-29f2-c7c5-fd12-030f72b174d8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/2c095bf014c14d7aa34a0f5bbfa3cb02 as hdfs://localhost:42677/user/jenkins/test-data/befe2273-29f2-c7c5-fd12-030f72b174d8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/2c095bf014c14d7aa34a0f5bbfa3cb02 2024-12-07T05:49:48,947 INFO [M:0;16b2ce7304d2:36221 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42677/user/jenkins/test-data/befe2273-29f2-c7c5-fd12-030f72b174d8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/2c095bf014c14d7aa34a0f5bbfa3cb02, entries=8, sequenceid=72, filesize=5.5 K 2024-12-07T05:49:48,948 DEBUG [M:0;16b2ce7304d2:36221 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42677/user/jenkins/test-data/befe2273-29f2-c7c5-fd12-030f72b174d8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/7e980a838f05450f87d7e388a44bfd5d as hdfs://localhost:42677/user/jenkins/test-data/befe2273-29f2-c7c5-fd12-030f72b174d8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/7e980a838f05450f87d7e388a44bfd5d 2024-12-07T05:49:48,955 INFO [M:0;16b2ce7304d2:36221 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42677/user/jenkins/test-data/befe2273-29f2-c7c5-fd12-030f72b174d8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/7e980a838f05450f87d7e388a44bfd5d, entries=8, sequenceid=72, filesize=6.3 K 2024-12-07T05:49:48,956 DEBUG [M:0;16b2ce7304d2:36221 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42677/user/jenkins/test-data/befe2273-29f2-c7c5-fd12-030f72b174d8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/fe33fafd24254d93be6f23313087f6f9 as hdfs://localhost:42677/user/jenkins/test-data/befe2273-29f2-c7c5-fd12-030f72b174d8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/fe33fafd24254d93be6f23313087f6f9 2024-12-07T05:49:48,963 INFO [M:0;16b2ce7304d2:36221 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42677/user/jenkins/test-data/befe2273-29f2-c7c5-fd12-030f72b174d8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/fe33fafd24254d93be6f23313087f6f9, entries=3, sequenceid=72, filesize=5.2 K 2024-12-07T05:49:48,964 INFO [M:0;16b2ce7304d2:36221 {}] regionserver.HRegion(3140): Finished flush of dataSize ~26.83 KB/27471, heapSize ~33.82 KB/34632, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 108ms, sequenceid=72, compaction requested=false 2024-12-07T05:49:48,966 INFO [M:0;16b2ce7304d2:36221 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T05:49:48,966 DEBUG [M:0;16b2ce7304d2:36221 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733550588856Disabling compacts and flushes for region at 1733550588856Disabling writes for close at 1733550588856Obtaining lock to block concurrent updates at 1733550588857 (+1 ms)Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733550588857Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=27471, getHeapSize=34872, getOffHeapSize=0, getCellsCount=85 at 1733550588857Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733550588858 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733550588858Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733550588872 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733550588872Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733550588886 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733550588900 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733550588900Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733550588912 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733550588927 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733550588927Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2608a934: reopening flushed file at 1733550588939 (+12 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4012dbdf: reopening flushed file at 1733550588947 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@325c33bf: reopening flushed file at 1733550588955 (+8 ms)Finished flush of dataSize ~26.83 KB/27471, heapSize ~33.82 KB/34632, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 108ms, sequenceid=72, compaction requested=false at 1733550588964 (+9 ms)Writing region close event to WAL at 1733550588966 (+2 ms)Closed at 1733550588966 2024-12-07T05:49:48,966 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T05:49:48,966 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T05:49:48,966 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T05:49:48,966 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T05:49:48,967 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T05:49:48,969 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42355 is added to blk_1073741830_1006 (size=32674) 2024-12-07T05:49:48,969 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36487 is added to blk_1073741830_1006 (size=32674) 2024-12-07T05:49:48,969 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42613 is added to blk_1073741830_1006 (size=32674) 2024-12-07T05:49:48,970 INFO [M:0;16b2ce7304d2:36221 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-07T05:49:48,970 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-07T05:49:48,970 INFO [M:0;16b2ce7304d2:36221 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:36221 2024-12-07T05:49:48,970 INFO [M:0;16b2ce7304d2:36221 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-07T05:49:49,094 INFO [M:0;16b2ce7304d2:36221 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-07T05:49:49,094 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36221-0x101afa04a420000, quorum=127.0.0.1:64227, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-07T05:49:49,095 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36221-0x101afa04a420000, quorum=127.0.0.1:64227, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-07T05:49:49,101 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@74fb60c3{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-07T05:49:49,102 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1380d9e3{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-07T05:49:49,102 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-07T05:49:49,103 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@16e08e17{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-07T05:49:49,103 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@78faf64e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44c44ce9-788d-29d3-4c40-67853e241664/hadoop.log.dir/,STOPPED} 2024-12-07T05:49:49,104 WARN [BP-788461391-172.17.0.2-1733550583832 heartbeating to localhost/127.0.0.1:42677 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-07T05:49:49,104 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-07T05:49:49,104 WARN [BP-788461391-172.17.0.2-1733550583832 heartbeating to localhost/127.0.0.1:42677 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-788461391-172.17.0.2-1733550583832 (Datanode Uuid 4cf1243f-7a6b-4bb1-a0b8-1ad35764c656) service to localhost/127.0.0.1:42677 2024-12-07T05:49:49,104 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-07T05:49:49,104 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44c44ce9-788d-29d3-4c40-67853e241664/cluster_b9ed59cb-9a45-c613-76fe-db0f3d760f17/data/data5/current/BP-788461391-172.17.0.2-1733550583832 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-07T05:49:49,105 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44c44ce9-788d-29d3-4c40-67853e241664/cluster_b9ed59cb-9a45-c613-76fe-db0f3d760f17/data/data6/current/BP-788461391-172.17.0.2-1733550583832 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-07T05:49:49,105 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-07T05:49:49,107 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2d7cac9e{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-07T05:49:49,107 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@785c29e1{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-07T05:49:49,107 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-07T05:49:49,107 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3cdd19d5{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-07T05:49:49,107 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3ef06ee6{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44c44ce9-788d-29d3-4c40-67853e241664/hadoop.log.dir/,STOPPED} 2024-12-07T05:49:49,108 WARN [BP-788461391-172.17.0.2-1733550583832 heartbeating to localhost/127.0.0.1:42677 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-07T05:49:49,108 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-07T05:49:49,108 WARN [BP-788461391-172.17.0.2-1733550583832 heartbeating to localhost/127.0.0.1:42677 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-788461391-172.17.0.2-1733550583832 (Datanode Uuid 3a8be341-97f1-4a7a-b50f-0bb7383ec952) service to localhost/127.0.0.1:42677 2024-12-07T05:49:49,108 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-07T05:49:49,109 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44c44ce9-788d-29d3-4c40-67853e241664/cluster_b9ed59cb-9a45-c613-76fe-db0f3d760f17/data/data3/current/BP-788461391-172.17.0.2-1733550583832 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-07T05:49:49,109 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44c44ce9-788d-29d3-4c40-67853e241664/cluster_b9ed59cb-9a45-c613-76fe-db0f3d760f17/data/data4/current/BP-788461391-172.17.0.2-1733550583832 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-07T05:49:49,109 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-07T05:49:49,111 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@501c8baa{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-07T05:49:49,111 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@25be713d{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-07T05:49:49,111 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-07T05:49:49,111 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@47d171d0{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-07T05:49:49,111 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2716dd5e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44c44ce9-788d-29d3-4c40-67853e241664/hadoop.log.dir/,STOPPED} 2024-12-07T05:49:49,112 WARN [BP-788461391-172.17.0.2-1733550583832 heartbeating to localhost/127.0.0.1:42677 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-07T05:49:49,112 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-07T05:49:49,112 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-07T05:49:49,112 WARN [BP-788461391-172.17.0.2-1733550583832 heartbeating to localhost/127.0.0.1:42677 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-788461391-172.17.0.2-1733550583832 (Datanode Uuid 4c20cca7-e729-43ff-ac15-eb243209ffe9) service to localhost/127.0.0.1:42677 2024-12-07T05:49:49,113 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44c44ce9-788d-29d3-4c40-67853e241664/cluster_b9ed59cb-9a45-c613-76fe-db0f3d760f17/data/data1/current/BP-788461391-172.17.0.2-1733550583832 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-07T05:49:49,113 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44c44ce9-788d-29d3-4c40-67853e241664/cluster_b9ed59cb-9a45-c613-76fe-db0f3d760f17/data/data2/current/BP-788461391-172.17.0.2-1733550583832 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-07T05:49:49,113 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-07T05:49:49,118 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3b0c25ba{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-07T05:49:49,118 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6a8e73dc{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-07T05:49:49,118 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-07T05:49:49,119 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4a627a74{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-07T05:49:49,119 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6d772bf2{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44c44ce9-788d-29d3-4c40-67853e241664/hadoop.log.dir/,STOPPED} 2024-12-07T05:49:49,126 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-07T05:49:49,157 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-07T05:49:49,162 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestHBaseWalOnEC#testReadWrite[1] Thread=151 (was 89) - Thread LEAK? -, OpenFileDescriptor=516 (was 447) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=257 (was 253) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=8019 (was 8216)