2024-12-08 00:46:04,043 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba 2024-12-08 00:46:04,054 main DEBUG Took 0.009193 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-12-08 00:46:04,055 main DEBUG PluginManager 'Core' found 129 plugins 2024-12-08 00:46:04,055 main DEBUG PluginManager 'Level' found 0 plugins 2024-12-08 00:46:04,056 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-12-08 00:46:04,057 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-08 00:46:04,071 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-12-08 00:46:04,082 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-08 00:46:04,084 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-08 00:46:04,085 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-08 00:46:04,085 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-08 00:46:04,086 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-08 00:46:04,086 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-08 00:46:04,088 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-08 00:46:04,088 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-08 00:46:04,089 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-08 00:46:04,089 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-08 00:46:04,090 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-08 00:46:04,091 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-08 00:46:04,091 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-08 00:46:04,092 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-08 00:46:04,092 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-08 00:46:04,093 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-08 00:46:04,093 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-08 00:46:04,094 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-08 00:46:04,094 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-08 00:46:04,095 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-08 00:46:04,095 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-08 00:46:04,096 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-08 00:46:04,096 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-08 00:46:04,097 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-08 00:46:04,097 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-08 00:46:04,097 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-12-08 00:46:04,099 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-08 00:46:04,101 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-12-08 00:46:04,103 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-12-08 00:46:04,104 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-12-08 00:46:04,105 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-12-08 00:46:04,106 main DEBUG PluginManager 'Converter' found 47 plugins 2024-12-08 00:46:04,115 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-12-08 00:46:04,118 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-12-08 00:46:04,120 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-12-08 00:46:04,120 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-12-08 00:46:04,120 main DEBUG createAppenders(={Console}) 2024-12-08 00:46:04,121 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba initialized 2024-12-08 00:46:04,121 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba 2024-12-08 00:46:04,122 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba OK. 2024-12-08 00:46:04,122 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-12-08 00:46:04,122 main DEBUG OutputStream closed 2024-12-08 00:46:04,123 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-12-08 00:46:04,123 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-12-08 00:46:04,123 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@49c7b90e OK 2024-12-08 00:46:04,185 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-12-08 00:46:04,187 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-12-08 00:46:04,188 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-12-08 00:46:04,189 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-12-08 00:46:04,189 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-12-08 00:46:04,190 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-12-08 00:46:04,190 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-12-08 00:46:04,190 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-12-08 00:46:04,191 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-12-08 00:46:04,191 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-12-08 00:46:04,191 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-12-08 00:46:04,192 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-12-08 00:46:04,192 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-12-08 00:46:04,192 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-12-08 00:46:04,193 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-12-08 00:46:04,193 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-12-08 00:46:04,193 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-12-08 00:46:04,194 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-12-08 00:46:04,196 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-08 00:46:04,196 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-logging/target/hbase-logging-4.0.0-alpha-1-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@35432107) with optional ClassLoader: null 2024-12-08 00:46:04,197 main DEBUG Shutdown hook enabled. Registering a new one. 2024-12-08 00:46:04,197 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@35432107] started OK. 2024-12-08T00:46:04,212 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC timeout: 26 mins 2024-12-08 00:46:04,215 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-12-08 00:46:04,215 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-08T00:46:04,425 DEBUG [main {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f276f572-de8e-772d-76ff-9dabc5287ae2 2024-12-08T00:46:04,446 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f276f572-de8e-772d-76ff-9dabc5287ae2/cluster_fe650a32-a427-b5dd-d78b-8d75fe045f4d, deleteOnExit=true 2024-12-08T00:46:04,447 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f276f572-de8e-772d-76ff-9dabc5287ae2/test.cache.data in system properties and HBase conf 2024-12-08T00:46:04,448 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f276f572-de8e-772d-76ff-9dabc5287ae2/hadoop.tmp.dir in system properties and HBase conf 2024-12-08T00:46:04,448 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f276f572-de8e-772d-76ff-9dabc5287ae2/hadoop.log.dir in system properties and HBase conf 2024-12-08T00:46:04,449 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f276f572-de8e-772d-76ff-9dabc5287ae2/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-08T00:46:04,449 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f276f572-de8e-772d-76ff-9dabc5287ae2/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-08T00:46:04,449 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-08T00:46:04,529 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-12-08T00:46:04,607 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-08T00:46:04,610 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f276f572-de8e-772d-76ff-9dabc5287ae2/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-08T00:46:04,611 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f276f572-de8e-772d-76ff-9dabc5287ae2/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-08T00:46:04,611 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f276f572-de8e-772d-76ff-9dabc5287ae2/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-08T00:46:04,611 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f276f572-de8e-772d-76ff-9dabc5287ae2/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-08T00:46:04,612 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f276f572-de8e-772d-76ff-9dabc5287ae2/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-08T00:46:04,612 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f276f572-de8e-772d-76ff-9dabc5287ae2/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-08T00:46:04,612 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f276f572-de8e-772d-76ff-9dabc5287ae2/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-08T00:46:04,613 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f276f572-de8e-772d-76ff-9dabc5287ae2/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-08T00:46:04,613 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f276f572-de8e-772d-76ff-9dabc5287ae2/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-08T00:46:04,613 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f276f572-de8e-772d-76ff-9dabc5287ae2/nfs.dump.dir in system properties and HBase conf 2024-12-08T00:46:04,614 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f276f572-de8e-772d-76ff-9dabc5287ae2/java.io.tmpdir in system properties and HBase conf 2024-12-08T00:46:04,614 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f276f572-de8e-772d-76ff-9dabc5287ae2/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-08T00:46:04,614 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f276f572-de8e-772d-76ff-9dabc5287ae2/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-08T00:46:04,615 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f276f572-de8e-772d-76ff-9dabc5287ae2/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-08T00:46:05,538 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-12-08T00:46:05,599 INFO [Time-limited test {}] log.Log(170): Logging initialized @2126ms to org.eclipse.jetty.util.log.Slf4jLog 2024-12-08T00:46:05,660 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-08T00:46:05,711 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-08T00:46:05,727 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-08T00:46:05,728 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-08T00:46:05,729 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-08T00:46:05,740 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-08T00:46:05,743 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@21b7d177{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f276f572-de8e-772d-76ff-9dabc5287ae2/hadoop.log.dir/,AVAILABLE} 2024-12-08T00:46:05,744 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@383d55e4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-08T00:46:05,899 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@76e4c45c{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f276f572-de8e-772d-76ff-9dabc5287ae2/java.io.tmpdir/jetty-localhost-38571-hadoop-hdfs-3_4_1-tests_jar-_-any-11004256306944695935/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-08T00:46:05,904 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4637aff6{HTTP/1.1, (http/1.1)}{localhost:38571} 2024-12-08T00:46:05,905 INFO [Time-limited test {}] server.Server(415): Started @2433ms 2024-12-08T00:46:06,442 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-08T00:46:06,448 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-08T00:46:06,449 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-08T00:46:06,449 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-08T00:46:06,449 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-08T00:46:06,450 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@550154bd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f276f572-de8e-772d-76ff-9dabc5287ae2/hadoop.log.dir/,AVAILABLE} 2024-12-08T00:46:06,450 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1a2478ad{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-08T00:46:06,542 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4839957b{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f276f572-de8e-772d-76ff-9dabc5287ae2/java.io.tmpdir/jetty-localhost-40013-hadoop-hdfs-3_4_1-tests_jar-_-any-1935602735789643771/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-08T00:46:06,543 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5306f615{HTTP/1.1, (http/1.1)}{localhost:40013} 2024-12-08T00:46:06,543 INFO [Time-limited test {}] server.Server(415): Started @3071ms 2024-12-08T00:46:06,588 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-08T00:46:06,683 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-08T00:46:06,689 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-08T00:46:06,691 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-08T00:46:06,691 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-08T00:46:06,691 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-08T00:46:06,695 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6463ad04{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f276f572-de8e-772d-76ff-9dabc5287ae2/hadoop.log.dir/,AVAILABLE} 2024-12-08T00:46:06,696 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7fa8fa5c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-08T00:46:06,792 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1c6b8f01{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f276f572-de8e-772d-76ff-9dabc5287ae2/java.io.tmpdir/jetty-localhost-33211-hadoop-hdfs-3_4_1-tests_jar-_-any-6562500572656139115/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-08T00:46:06,793 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@11f28dd2{HTTP/1.1, (http/1.1)}{localhost:33211} 2024-12-08T00:46:06,793 INFO [Time-limited test {}] server.Server(415): Started @3321ms 2024-12-08T00:46:06,795 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-08T00:46:06,835 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-08T00:46:06,840 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-08T00:46:06,841 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-08T00:46:06,841 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-08T00:46:06,842 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-08T00:46:06,842 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@c62369b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f276f572-de8e-772d-76ff-9dabc5287ae2/hadoop.log.dir/,AVAILABLE} 2024-12-08T00:46:06,843 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@24f92c39{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-08T00:46:06,935 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2e59159d{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f276f572-de8e-772d-76ff-9dabc5287ae2/java.io.tmpdir/jetty-localhost-34305-hadoop-hdfs-3_4_1-tests_jar-_-any-8210893411248026718/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-08T00:46:06,936 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@a8e922f{HTTP/1.1, (http/1.1)}{localhost:34305} 2024-12-08T00:46:06,936 INFO [Time-limited test {}] server.Server(415): Started @3464ms 2024-12-08T00:46:06,938 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-08T00:46:07,902 WARN [Thread-122 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f276f572-de8e-772d-76ff-9dabc5287ae2/cluster_fe650a32-a427-b5dd-d78b-8d75fe045f4d/data/data1/current/BP-356801691-172.17.0.2-1733618765086/current, will proceed with Du for space computation calculation, 2024-12-08T00:46:07,902 WARN [Thread-124 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f276f572-de8e-772d-76ff-9dabc5287ae2/cluster_fe650a32-a427-b5dd-d78b-8d75fe045f4d/data/data2/current/BP-356801691-172.17.0.2-1733618765086/current, will proceed with Du for space computation calculation, 2024-12-08T00:46:07,902 WARN [Thread-123 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f276f572-de8e-772d-76ff-9dabc5287ae2/cluster_fe650a32-a427-b5dd-d78b-8d75fe045f4d/data/data3/current/BP-356801691-172.17.0.2-1733618765086/current, will proceed with Du for space computation calculation, 2024-12-08T00:46:07,902 WARN [Thread-125 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f276f572-de8e-772d-76ff-9dabc5287ae2/cluster_fe650a32-a427-b5dd-d78b-8d75fe045f4d/data/data4/current/BP-356801691-172.17.0.2-1733618765086/current, will proceed with Du for space computation calculation, 2024-12-08T00:46:07,927 WARN [Thread-136 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f276f572-de8e-772d-76ff-9dabc5287ae2/cluster_fe650a32-a427-b5dd-d78b-8d75fe045f4d/data/data5/current/BP-356801691-172.17.0.2-1733618765086/current, will proceed with Du for space computation calculation, 2024-12-08T00:46:07,928 WARN [Thread-137 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f276f572-de8e-772d-76ff-9dabc5287ae2/cluster_fe650a32-a427-b5dd-d78b-8d75fe045f4d/data/data6/current/BP-356801691-172.17.0.2-1733618765086/current, will proceed with Du for space computation calculation, 2024-12-08T00:46:07,943 WARN [Thread-81 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-08T00:46:07,943 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-08T00:46:07,955 WARN [Thread-103 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-08T00:46:07,990 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x50ae6e9113fb7433 with lease ID 0xe50d5edde396aca0: Processing first storage report for DS-db17d945-d425-4a3e-9c52-f7aeafd06e0d from datanode DatanodeRegistration(127.0.0.1:34549, datanodeUuid=f32052f7-e40d-41db-922e-ecaf018ca26e, infoPort=38021, infoSecurePort=0, ipcPort=38659, storageInfo=lv=-57;cid=testClusterID;nsid=469615171;c=1733618765086) 2024-12-08T00:46:07,991 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x50ae6e9113fb7433 with lease ID 0xe50d5edde396aca0: from storage DS-db17d945-d425-4a3e-9c52-f7aeafd06e0d node DatanodeRegistration(127.0.0.1:34549, datanodeUuid=f32052f7-e40d-41db-922e-ecaf018ca26e, infoPort=38021, infoSecurePort=0, ipcPort=38659, storageInfo=lv=-57;cid=testClusterID;nsid=469615171;c=1733618765086), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-08T00:46:07,991 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x28742b565818aa1c with lease ID 0xe50d5edde396aca1: Processing first storage report for DS-adecbfca-b5d6-49b5-afa5-f05e8830b783 from datanode DatanodeRegistration(127.0.0.1:41981, datanodeUuid=d01efd29-d8b0-4fa5-8dc1-8b567d2cbc62, infoPort=40355, infoSecurePort=0, ipcPort=40011, storageInfo=lv=-57;cid=testClusterID;nsid=469615171;c=1733618765086) 2024-12-08T00:46:07,991 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x28742b565818aa1c with lease ID 0xe50d5edde396aca1: from storage DS-adecbfca-b5d6-49b5-afa5-f05e8830b783 node DatanodeRegistration(127.0.0.1:41981, datanodeUuid=d01efd29-d8b0-4fa5-8dc1-8b567d2cbc62, infoPort=40355, infoSecurePort=0, ipcPort=40011, storageInfo=lv=-57;cid=testClusterID;nsid=469615171;c=1733618765086), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-08T00:46:07,991 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xf117e8cb2fe0e24d with lease ID 0xe50d5edde396ac9f: Processing first storage report for DS-8e40799e-57e0-4759-b491-cb8bbcd0ba02 from datanode DatanodeRegistration(127.0.0.1:40453, datanodeUuid=e64a82fd-317a-45dd-8076-62a80a46ad24, infoPort=45419, infoSecurePort=0, ipcPort=38641, storageInfo=lv=-57;cid=testClusterID;nsid=469615171;c=1733618765086) 2024-12-08T00:46:07,991 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf117e8cb2fe0e24d with lease ID 0xe50d5edde396ac9f: from storage DS-8e40799e-57e0-4759-b491-cb8bbcd0ba02 node DatanodeRegistration(127.0.0.1:40453, datanodeUuid=e64a82fd-317a-45dd-8076-62a80a46ad24, infoPort=45419, infoSecurePort=0, ipcPort=38641, storageInfo=lv=-57;cid=testClusterID;nsid=469615171;c=1733618765086), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-08T00:46:07,992 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x50ae6e9113fb7433 with lease ID 0xe50d5edde396aca0: Processing first storage report for DS-770e5b0a-9b33-4469-94ba-2663dd4d7ea5 from datanode DatanodeRegistration(127.0.0.1:34549, datanodeUuid=f32052f7-e40d-41db-922e-ecaf018ca26e, infoPort=38021, infoSecurePort=0, ipcPort=38659, storageInfo=lv=-57;cid=testClusterID;nsid=469615171;c=1733618765086) 2024-12-08T00:46:07,992 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x50ae6e9113fb7433 with lease ID 0xe50d5edde396aca0: from storage DS-770e5b0a-9b33-4469-94ba-2663dd4d7ea5 node DatanodeRegistration(127.0.0.1:34549, datanodeUuid=f32052f7-e40d-41db-922e-ecaf018ca26e, infoPort=38021, infoSecurePort=0, ipcPort=38659, storageInfo=lv=-57;cid=testClusterID;nsid=469615171;c=1733618765086), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-08T00:46:07,992 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x28742b565818aa1c with lease ID 0xe50d5edde396aca1: Processing first storage report for DS-6b7944a8-337b-4ab7-9952-1fc5c514efcc from datanode DatanodeRegistration(127.0.0.1:41981, datanodeUuid=d01efd29-d8b0-4fa5-8dc1-8b567d2cbc62, infoPort=40355, infoSecurePort=0, ipcPort=40011, storageInfo=lv=-57;cid=testClusterID;nsid=469615171;c=1733618765086) 2024-12-08T00:46:07,992 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x28742b565818aa1c with lease ID 0xe50d5edde396aca1: from storage DS-6b7944a8-337b-4ab7-9952-1fc5c514efcc node DatanodeRegistration(127.0.0.1:41981, datanodeUuid=d01efd29-d8b0-4fa5-8dc1-8b567d2cbc62, infoPort=40355, infoSecurePort=0, ipcPort=40011, storageInfo=lv=-57;cid=testClusterID;nsid=469615171;c=1733618765086), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-08T00:46:07,992 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xf117e8cb2fe0e24d with lease ID 0xe50d5edde396ac9f: Processing first storage report for DS-8c987d9b-2f06-413a-95d2-e8e84a905c91 from datanode DatanodeRegistration(127.0.0.1:40453, datanodeUuid=e64a82fd-317a-45dd-8076-62a80a46ad24, infoPort=45419, infoSecurePort=0, ipcPort=38641, storageInfo=lv=-57;cid=testClusterID;nsid=469615171;c=1733618765086) 2024-12-08T00:46:07,992 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf117e8cb2fe0e24d with lease ID 0xe50d5edde396ac9f: from storage DS-8c987d9b-2f06-413a-95d2-e8e84a905c91 node DatanodeRegistration(127.0.0.1:40453, datanodeUuid=e64a82fd-317a-45dd-8076-62a80a46ad24, infoPort=45419, infoSecurePort=0, ipcPort=38641, storageInfo=lv=-57;cid=testClusterID;nsid=469615171;c=1733618765086), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-08T00:46:08,019 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f276f572-de8e-772d-76ff-9dabc5287ae2 2024-12-08T00:46:08,080 WARN [Time-limited test {}] erasurecode.ErasureCodeNative(55): ISA-L support is not available in your platform... using builtin-java codec where applicable 2024-12-08T00:46:08,127 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestHBaseWalOnEC#testReadWrite[0] Thread=159, OpenFileDescriptor=391, MaxFileDescriptor=1048576, SystemLoadAverage=183, ProcessCount=11, AvailableMemoryMB=18113 2024-12-08T00:46:08,129 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=3, rsPorts=, rsClass=null, numDataNodes=3, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-08T00:46:08,135 INFO [Time-limited test {}] hbase.HBaseTestingUtil(821): NOT STARTING DFS 2024-12-08T00:46:08,201 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f276f572-de8e-772d-76ff-9dabc5287ae2/cluster_fe650a32-a427-b5dd-d78b-8d75fe045f4d/zookeeper_0, clientPort=55931, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f276f572-de8e-772d-76ff-9dabc5287ae2/cluster_fe650a32-a427-b5dd-d78b-8d75fe045f4d/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f276f572-de8e-772d-76ff-9dabc5287ae2/cluster_fe650a32-a427-b5dd-d78b-8d75fe045f4d/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-08T00:46:08,211 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=55931 2024-12-08T00:46:08,220 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T00:46:08,223 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T00:46:08,316 WARN [Time-limited test {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-08T00:46:08,316 WARN [Time-limited test {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-08T00:46:08,356 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_636387061_22 at /127.0.0.1:60140 [Receiving block BP-356801691-172.17.0.2-1733618765086:blk_-9223372036854775792_1001] {}] datanode.DataXceiver(331): 127.0.0.1:41981:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60140 dst: /127.0.0.1:41981 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:46:08,377 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41981 is added to blk_-9223372036854775792_1002 (size=7) 2024-12-08T00:46:08,775 WARN [Time-limited test {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-08T00:46:08,787 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:46415/user/jenkins/test-data/f88bb015-a4dc-a798-6f89-5983724f011a with version=8 2024-12-08T00:46:08,787 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:46415/user/jenkins/test-data/f88bb015-a4dc-a798-6f89-5983724f011a/hbase-staging 2024-12-08T00:46:08,864 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-12-08T00:46:09,102 INFO [Time-limited test {}] client.ConnectionUtils(128): master/0f983e3e5be1:0 server-side Connection retries=45 2024-12-08T00:46:09,110 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-08T00:46:09,111 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-08T00:46:09,115 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-08T00:46:09,115 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-08T00:46:09,115 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-08T00:46:09,234 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-08T00:46:09,293 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-12-08T00:46:09,301 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-12-08T00:46:09,305 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-08T00:46:09,326 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 19644 (auto-detected) 2024-12-08T00:46:09,327 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-12-08T00:46:09,342 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:40457 2024-12-08T00:46:09,360 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:40457 connecting to ZooKeeper ensemble=127.0.0.1:55931 2024-12-08T00:46:09,485 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:404570x0, quorum=127.0.0.1:55931, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-08T00:46:09,487 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:40457-0x10002f0d7d50000 connected 2024-12-08T00:46:09,583 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T00:46:09,589 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T00:46:09,599 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:40457-0x10002f0d7d50000, quorum=127.0.0.1:55931, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-08T00:46:09,604 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:46415/user/jenkins/test-data/f88bb015-a4dc-a798-6f89-5983724f011a, hbase.cluster.distributed=false 2024-12-08T00:46:09,627 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:40457-0x10002f0d7d50000, quorum=127.0.0.1:55931, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-08T00:46:09,633 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=40457 2024-12-08T00:46:09,633 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=40457 2024-12-08T00:46:09,634 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=40457 2024-12-08T00:46:09,634 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=40457 2024-12-08T00:46:09,635 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=40457 2024-12-08T00:46:09,722 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/0f983e3e5be1:0 server-side Connection retries=45 2024-12-08T00:46:09,724 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-08T00:46:09,724 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-08T00:46:09,724 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-08T00:46:09,724 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-08T00:46:09,724 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-08T00:46:09,727 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-08T00:46:09,729 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-08T00:46:09,730 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:44379 2024-12-08T00:46:09,731 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:44379 connecting to ZooKeeper ensemble=127.0.0.1:55931 2024-12-08T00:46:09,732 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T00:46:09,734 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T00:46:09,752 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:443790x0, quorum=127.0.0.1:55931, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-08T00:46:09,753 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44379-0x10002f0d7d50001, quorum=127.0.0.1:55931, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-08T00:46:09,753 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:44379-0x10002f0d7d50001 connected 2024-12-08T00:46:09,758 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-08T00:46:09,766 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-08T00:46:09,768 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44379-0x10002f0d7d50001, quorum=127.0.0.1:55931, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-08T00:46:09,774 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44379-0x10002f0d7d50001, quorum=127.0.0.1:55931, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-08T00:46:09,774 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=44379 2024-12-08T00:46:09,775 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=44379 2024-12-08T00:46:09,776 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=44379 2024-12-08T00:46:09,776 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=44379 2024-12-08T00:46:09,779 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=44379 2024-12-08T00:46:09,793 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/0f983e3e5be1:0 server-side Connection retries=45 2024-12-08T00:46:09,793 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-08T00:46:09,794 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-08T00:46:09,794 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-08T00:46:09,794 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-08T00:46:09,794 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-08T00:46:09,794 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-08T00:46:09,795 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-08T00:46:09,795 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:33233 2024-12-08T00:46:09,797 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:33233 connecting to ZooKeeper ensemble=127.0.0.1:55931 2024-12-08T00:46:09,798 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T00:46:09,799 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T00:46:09,814 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:332330x0, quorum=127.0.0.1:55931, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-08T00:46:09,815 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:332330x0, quorum=127.0.0.1:55931, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-08T00:46:09,815 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:33233-0x10002f0d7d50002 connected 2024-12-08T00:46:09,815 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-08T00:46:09,816 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-08T00:46:09,817 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33233-0x10002f0d7d50002, quorum=127.0.0.1:55931, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-08T00:46:09,819 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33233-0x10002f0d7d50002, quorum=127.0.0.1:55931, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-08T00:46:09,820 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=33233 2024-12-08T00:46:09,820 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=33233 2024-12-08T00:46:09,821 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=33233 2024-12-08T00:46:09,822 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=33233 2024-12-08T00:46:09,822 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=33233 2024-12-08T00:46:09,837 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/0f983e3e5be1:0 server-side Connection retries=45 2024-12-08T00:46:09,837 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-08T00:46:09,837 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-08T00:46:09,838 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-08T00:46:09,838 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-08T00:46:09,838 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-08T00:46:09,838 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-08T00:46:09,838 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-08T00:46:09,839 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:38841 2024-12-08T00:46:09,840 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:38841 connecting to ZooKeeper ensemble=127.0.0.1:55931 2024-12-08T00:46:09,841 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T00:46:09,843 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T00:46:09,855 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:388410x0, quorum=127.0.0.1:55931, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-08T00:46:09,856 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38841-0x10002f0d7d50003, quorum=127.0.0.1:55931, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-08T00:46:09,856 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:38841-0x10002f0d7d50003 connected 2024-12-08T00:46:09,857 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-08T00:46:09,858 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-08T00:46:09,859 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38841-0x10002f0d7d50003, quorum=127.0.0.1:55931, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-08T00:46:09,862 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38841-0x10002f0d7d50003, quorum=127.0.0.1:55931, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-08T00:46:09,862 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=38841 2024-12-08T00:46:09,863 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=38841 2024-12-08T00:46:09,863 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=38841 2024-12-08T00:46:09,865 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=38841 2024-12-08T00:46:09,865 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=38841 2024-12-08T00:46:09,880 DEBUG [M:0;0f983e3e5be1:40457 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;0f983e3e5be1:40457 2024-12-08T00:46:09,881 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/0f983e3e5be1,40457,1733618768954 2024-12-08T00:46:09,894 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33233-0x10002f0d7d50002, quorum=127.0.0.1:55931, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-08T00:46:09,894 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44379-0x10002f0d7d50001, quorum=127.0.0.1:55931, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-08T00:46:09,894 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38841-0x10002f0d7d50003, quorum=127.0.0.1:55931, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-08T00:46:09,894 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40457-0x10002f0d7d50000, quorum=127.0.0.1:55931, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-08T00:46:09,896 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:40457-0x10002f0d7d50000, quorum=127.0.0.1:55931, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/0f983e3e5be1,40457,1733618768954 2024-12-08T00:46:09,922 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44379-0x10002f0d7d50001, quorum=127.0.0.1:55931, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-08T00:46:09,922 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33233-0x10002f0d7d50002, quorum=127.0.0.1:55931, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-08T00:46:09,922 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38841-0x10002f0d7d50003, quorum=127.0.0.1:55931, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-08T00:46:09,922 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40457-0x10002f0d7d50000, quorum=127.0.0.1:55931, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T00:46:09,922 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44379-0x10002f0d7d50001, quorum=127.0.0.1:55931, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T00:46:09,922 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38841-0x10002f0d7d50003, quorum=127.0.0.1:55931, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T00:46:09,922 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33233-0x10002f0d7d50002, quorum=127.0.0.1:55931, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T00:46:09,923 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:40457-0x10002f0d7d50000, quorum=127.0.0.1:55931, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-08T00:46:09,925 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/0f983e3e5be1,40457,1733618768954 from backup master directory 2024-12-08T00:46:09,935 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33233-0x10002f0d7d50002, quorum=127.0.0.1:55931, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-08T00:46:09,935 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40457-0x10002f0d7d50000, quorum=127.0.0.1:55931, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/0f983e3e5be1,40457,1733618768954 2024-12-08T00:46:09,935 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44379-0x10002f0d7d50001, quorum=127.0.0.1:55931, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-08T00:46:09,935 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38841-0x10002f0d7d50003, quorum=127.0.0.1:55931, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-08T00:46:09,935 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40457-0x10002f0d7d50000, quorum=127.0.0.1:55931, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-08T00:46:09,936 WARN [master/0f983e3e5be1:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-08T00:46:09,936 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=0f983e3e5be1,40457,1733618768954 2024-12-08T00:46:09,938 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-12-08T00:46:09,940 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-12-08T00:46:09,994 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:46415/user/jenkins/test-data/f88bb015-a4dc-a798-6f89-5983724f011a/hbase.id] with ID: 2d1b2c2a-5c77-43b0-aa0b-b5232f4fe46d 2024-12-08T00:46:09,994 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:46415/user/jenkins/test-data/f88bb015-a4dc-a798-6f89-5983724f011a/.tmp/hbase.id 2024-12-08T00:46:10,001 WARN [master/0f983e3e5be1:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-08T00:46:10,002 WARN [master/0f983e3e5be1:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-08T00:46:10,004 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_636387061_22 at /127.0.0.1:44062 [Receiving block BP-356801691-172.17.0.2-1733618765086:blk_-9223372036854775776_1003] {}] datanode.DataXceiver(331): 127.0.0.1:34549:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:44062 dst: /127.0.0.1:34549 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:46:10,010 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34549 is added to blk_-9223372036854775776_1004 (size=42) 2024-12-08T00:46:10,011 WARN [master/0f983e3e5be1:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-08T00:46:10,011 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:46415/user/jenkins/test-data/f88bb015-a4dc-a798-6f89-5983724f011a/.tmp/hbase.id]:[hdfs://localhost:46415/user/jenkins/test-data/f88bb015-a4dc-a798-6f89-5983724f011a/hbase.id] 2024-12-08T00:46:10,053 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T00:46:10,056 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-08T00:46:10,072 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 14ms. 2024-12-08T00:46:10,097 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40457-0x10002f0d7d50000, quorum=127.0.0.1:55931, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T00:46:10,097 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33233-0x10002f0d7d50002, quorum=127.0.0.1:55931, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T00:46:10,097 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44379-0x10002f0d7d50001, quorum=127.0.0.1:55931, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T00:46:10,097 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38841-0x10002f0d7d50003, quorum=127.0.0.1:55931, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T00:46:10,111 WARN [master/0f983e3e5be1:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-08T00:46:10,112 WARN [master/0f983e3e5be1:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-08T00:46:10,115 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_636387061_22 at /127.0.0.1:60160 [Receiving block BP-356801691-172.17.0.2-1733618765086:blk_-9223372036854775760_1005] {}] datanode.DataXceiver(331): 127.0.0.1:41981:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60160 dst: /127.0.0.1:41981 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:46:10,120 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41981 is added to blk_-9223372036854775760_1006 (size=196) 2024-12-08T00:46:10,121 WARN [master/0f983e3e5be1:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-08T00:46:10,134 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-08T00:46:10,136 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-08T00:46:10,141 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-08T00:46:10,165 WARN [master/0f983e3e5be1:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-08T00:46:10,165 WARN [master/0f983e3e5be1:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-08T00:46:10,167 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_636387061_22 at /127.0.0.1:44078 [Receiving block BP-356801691-172.17.0.2-1733618765086:blk_-9223372036854775744_1007] {}] datanode.DataXceiver(331): 127.0.0.1:34549:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:44078 dst: /127.0.0.1:34549 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:46:10,172 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34549 is added to blk_-9223372036854775744_1008 (size=1189) 2024-12-08T00:46:10,173 WARN [master/0f983e3e5be1:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-08T00:46:10,189 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:46415/user/jenkins/test-data/f88bb015-a4dc-a798-6f89-5983724f011a/MasterData/data/master/store 2024-12-08T00:46:10,203 WARN [master/0f983e3e5be1:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-08T00:46:10,203 WARN [master/0f983e3e5be1:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-08T00:46:10,206 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_636387061_22 at /127.0.0.1:51924 [Receiving block BP-356801691-172.17.0.2-1733618765086:blk_-9223372036854775728_1009] {}] datanode.DataXceiver(331): 127.0.0.1:40453:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51924 dst: /127.0.0.1:40453 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:46:10,211 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40453 is added to blk_-9223372036854775728_1010 (size=34) 2024-12-08T00:46:10,212 WARN [master/0f983e3e5be1:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-08T00:46:10,215 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-12-08T00:46:10,218 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T00:46:10,219 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-08T00:46:10,219 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T00:46:10,219 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T00:46:10,221 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-08T00:46:10,221 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T00:46:10,221 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T00:46:10,222 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733618770219Disabling compacts and flushes for region at 1733618770219Disabling writes for close at 1733618770221 (+2 ms)Writing region close event to WAL at 1733618770221Closed at 1733618770221 2024-12-08T00:46:10,224 WARN [master/0f983e3e5be1:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:46415/user/jenkins/test-data/f88bb015-a4dc-a798-6f89-5983724f011a/MasterData/data/master/store/.initializing 2024-12-08T00:46:10,224 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:46415/user/jenkins/test-data/f88bb015-a4dc-a798-6f89-5983724f011a/MasterData/WALs/0f983e3e5be1,40457,1733618768954 2024-12-08T00:46:10,231 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-08T00:46:10,246 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=0f983e3e5be1%2C40457%2C1733618768954, suffix=, logDir=hdfs://localhost:46415/user/jenkins/test-data/f88bb015-a4dc-a798-6f89-5983724f011a/MasterData/WALs/0f983e3e5be1,40457,1733618768954, archiveDir=hdfs://localhost:46415/user/jenkins/test-data/f88bb015-a4dc-a798-6f89-5983724f011a/MasterData/oldWALs, maxLogs=10 2024-12-08T00:46:10,281 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/f88bb015-a4dc-a798-6f89-5983724f011a/MasterData/WALs/0f983e3e5be1,40457,1733618768954/0f983e3e5be1%2C40457%2C1733618768954.1733618770252, exclude list is [], retry=0 2024-12-08T00:46:10,297 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(244): No decryptEncryptedDataEncryptionKey method in DFSClient, should be hadoop version with HDFS-12396 java.lang.NoSuchMethodException: org.apache.hadoop.hdfs.DFSClient.decryptEncryptedDataEncryptionKey(org.apache.hadoop.fs.FileEncryptionInfo) at java.lang.Class.getDeclaredMethod(Class.java:2675) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelperWithoutHDFS12396(FanOutOneBlockAsyncDFSOutputSaslHelper.java:183) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelper(FanOutOneBlockAsyncDFSOutputSaslHelper.java:242) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.(FanOutOneBlockAsyncDFSOutputSaslHelper.java:253) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.initialize(FanOutOneBlockAsyncDFSOutputHelper.java:413) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper$5.operationComplete(FanOutOneBlockAsyncDFSOutputHelper.java:472) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper$5.operationComplete(FanOutOneBlockAsyncDFSOutputHelper.java:467) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.NettyFutureUtils.lambda$addListener$0(NettyFutureUtils.java:56) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListener0(DefaultPromise.java:590) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListeners0(DefaultPromise.java:583) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListenersNow(DefaultPromise.java:559) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListeners(DefaultPromise.java:492) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.setValue0(DefaultPromise.java:636) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.setSuccess0(DefaultPromise.java:625) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.trySuccess(DefaultPromise.java:105) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPromise.trySuccess(DefaultChannelPromise.java:84) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.fulfillConnectPromise(AbstractEpollChannel.java:658) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.finishConnect(AbstractEpollChannel.java:696) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.epollOutReady(AbstractEpollChannel.java:567) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.processReady(EpollEventLoop.java:491) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:399) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:46:10,298 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:34549,DS-db17d945-d425-4a3e-9c52-f7aeafd06e0d,DISK] 2024-12-08T00:46:10,298 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:41981,DS-adecbfca-b5d6-49b5-afa5-f05e8830b783,DISK] 2024-12-08T00:46:10,298 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:40453,DS-8e40799e-57e0-4759-b491-cb8bbcd0ba02,DISK] 2024-12-08T00:46:10,301 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.ProtobufDecoder(117): Hadoop 3.3 and above shades protobuf. 2024-12-08T00:46:10,335 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/f88bb015-a4dc-a798-6f89-5983724f011a/MasterData/WALs/0f983e3e5be1,40457,1733618768954/0f983e3e5be1%2C40457%2C1733618768954.1733618770252 2024-12-08T00:46:10,336 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:45419:45419),(127.0.0.1/127.0.0.1:38021:38021),(127.0.0.1/127.0.0.1:40355:40355)] 2024-12-08T00:46:10,337 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-08T00:46:10,337 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T00:46:10,340 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-08T00:46:10,340 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-08T00:46:10,372 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-08T00:46:10,392 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-08T00:46:10,395 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:46:10,398 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T00:46:10,398 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-08T00:46:10,401 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-08T00:46:10,402 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:46:10,403 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-08T00:46:10,403 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-08T00:46:10,406 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-08T00:46:10,406 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:46:10,407 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-08T00:46:10,407 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-08T00:46:10,409 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-08T00:46:10,409 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:46:10,410 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-08T00:46:10,410 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-08T00:46:10,414 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46415/user/jenkins/test-data/f88bb015-a4dc-a798-6f89-5983724f011a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-08T00:46:10,415 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46415/user/jenkins/test-data/f88bb015-a4dc-a798-6f89-5983724f011a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-08T00:46:10,420 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-08T00:46:10,421 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-08T00:46:10,425 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-08T00:46:10,429 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-08T00:46:10,435 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46415/user/jenkins/test-data/f88bb015-a4dc-a798-6f89-5983724f011a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-08T00:46:10,436 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=61256963, jitterRate=-0.0872001200914383}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-08T00:46:10,441 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733618770351Initializing all the Stores at 1733618770353 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733618770353Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733618770354 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733618770354Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733618770354Cleaning up temporary data from old regions at 1733618770421 (+67 ms)Region opened successfully at 1733618770441 (+20 ms) 2024-12-08T00:46:10,442 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-08T00:46:10,471 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3d10db43, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=0f983e3e5be1/172.17.0.2:0 2024-12-08T00:46:10,496 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-08T00:46:10,506 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-08T00:46:10,506 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-08T00:46:10,508 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-08T00:46:10,510 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 1 msec 2024-12-08T00:46:10,515 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 5 msec 2024-12-08T00:46:10,515 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-08T00:46:10,536 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-08T00:46:10,543 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40457-0x10002f0d7d50000, quorum=127.0.0.1:55931, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-08T00:46:10,589 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-08T00:46:10,594 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-08T00:46:10,597 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40457-0x10002f0d7d50000, quorum=127.0.0.1:55931, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-08T00:46:10,605 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-08T00:46:10,607 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-08T00:46:10,612 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40457-0x10002f0d7d50000, quorum=127.0.0.1:55931, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-08T00:46:10,622 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-08T00:46:10,623 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40457-0x10002f0d7d50000, quorum=127.0.0.1:55931, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-08T00:46:10,635 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-08T00:46:10,657 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40457-0x10002f0d7d50000, quorum=127.0.0.1:55931, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-08T00:46:10,668 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-08T00:46:10,680 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38841-0x10002f0d7d50003, quorum=127.0.0.1:55931, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-08T00:46:10,680 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44379-0x10002f0d7d50001, quorum=127.0.0.1:55931, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-08T00:46:10,680 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33233-0x10002f0d7d50002, quorum=127.0.0.1:55931, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-08T00:46:10,680 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40457-0x10002f0d7d50000, quorum=127.0.0.1:55931, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-08T00:46:10,680 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38841-0x10002f0d7d50003, quorum=127.0.0.1:55931, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T00:46:10,681 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40457-0x10002f0d7d50000, quorum=127.0.0.1:55931, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T00:46:10,681 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33233-0x10002f0d7d50002, quorum=127.0.0.1:55931, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T00:46:10,681 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44379-0x10002f0d7d50001, quorum=127.0.0.1:55931, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T00:46:10,685 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=0f983e3e5be1,40457,1733618768954, sessionid=0x10002f0d7d50000, setting cluster-up flag (Was=false) 2024-12-08T00:46:10,713 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33233-0x10002f0d7d50002, quorum=127.0.0.1:55931, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T00:46:10,713 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40457-0x10002f0d7d50000, quorum=127.0.0.1:55931, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T00:46:10,713 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44379-0x10002f0d7d50001, quorum=127.0.0.1:55931, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T00:46:10,713 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38841-0x10002f0d7d50003, quorum=127.0.0.1:55931, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T00:46:10,739 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-08T00:46:10,743 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=0f983e3e5be1,40457,1733618768954 2024-12-08T00:46:10,764 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33233-0x10002f0d7d50002, quorum=127.0.0.1:55931, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T00:46:10,764 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44379-0x10002f0d7d50001, quorum=127.0.0.1:55931, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T00:46:10,764 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40457-0x10002f0d7d50000, quorum=127.0.0.1:55931, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T00:46:10,764 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38841-0x10002f0d7d50003, quorum=127.0.0.1:55931, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T00:46:10,788 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-08T00:46:10,791 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=0f983e3e5be1,40457,1733618768954 2024-12-08T00:46:10,800 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:46415/user/jenkins/test-data/f88bb015-a4dc-a798-6f89-5983724f011a/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-08T00:46:10,862 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-08T00:46:10,869 INFO [RS:2;0f983e3e5be1:38841 {}] regionserver.HRegionServer(746): ClusterId : 2d1b2c2a-5c77-43b0-aa0b-b5232f4fe46d 2024-12-08T00:46:10,869 INFO [RS:0;0f983e3e5be1:44379 {}] regionserver.HRegionServer(746): ClusterId : 2d1b2c2a-5c77-43b0-aa0b-b5232f4fe46d 2024-12-08T00:46:10,869 INFO [RS:1;0f983e3e5be1:33233 {}] regionserver.HRegionServer(746): ClusterId : 2d1b2c2a-5c77-43b0-aa0b-b5232f4fe46d 2024-12-08T00:46:10,870 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-08T00:46:10,872 DEBUG [RS:2;0f983e3e5be1:38841 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-08T00:46:10,872 DEBUG [RS:1;0f983e3e5be1:33233 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-08T00:46:10,872 DEBUG [RS:0;0f983e3e5be1:44379 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-08T00:46:10,876 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-08T00:46:10,881 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 0f983e3e5be1,40457,1733618768954 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-08T00:46:10,907 DEBUG [RS:1;0f983e3e5be1:33233 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-08T00:46:10,907 DEBUG [RS:2;0f983e3e5be1:38841 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-08T00:46:10,907 DEBUG [RS:0;0f983e3e5be1:44379 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-08T00:46:10,907 DEBUG [RS:1;0f983e3e5be1:33233 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-08T00:46:10,907 DEBUG [RS:2;0f983e3e5be1:38841 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-08T00:46:10,907 DEBUG [RS:0;0f983e3e5be1:44379 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-08T00:46:10,908 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/0f983e3e5be1:0, corePoolSize=5, maxPoolSize=5 2024-12-08T00:46:10,909 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/0f983e3e5be1:0, corePoolSize=5, maxPoolSize=5 2024-12-08T00:46:10,909 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/0f983e3e5be1:0, corePoolSize=5, maxPoolSize=5 2024-12-08T00:46:10,909 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/0f983e3e5be1:0, corePoolSize=5, maxPoolSize=5 2024-12-08T00:46:10,909 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/0f983e3e5be1:0, corePoolSize=10, maxPoolSize=10 2024-12-08T00:46:10,910 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/0f983e3e5be1:0, corePoolSize=1, maxPoolSize=1 2024-12-08T00:46:10,910 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/0f983e3e5be1:0, corePoolSize=2, maxPoolSize=2 2024-12-08T00:46:10,910 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/0f983e3e5be1:0, corePoolSize=1, maxPoolSize=1 2024-12-08T00:46:10,915 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-08T00:46:10,916 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-08T00:46:10,918 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733618800918 2024-12-08T00:46:10,920 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-08T00:46:10,921 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-08T00:46:10,924 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-08T00:46:10,924 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:46:10,924 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-08T00:46:10,924 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-08T00:46:10,924 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-08T00:46:10,924 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-08T00:46:10,925 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-08T00:46:10,929 DEBUG [RS:0;0f983e3e5be1:44379 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-08T00:46:10,929 DEBUG [RS:1;0f983e3e5be1:33233 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-08T00:46:10,929 DEBUG [RS:2;0f983e3e5be1:38841 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-08T00:46:10,929 DEBUG [RS:0;0f983e3e5be1:44379 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7e43231f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=0f983e3e5be1/172.17.0.2:0 2024-12-08T00:46:10,929 DEBUG [RS:1;0f983e3e5be1:33233 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@55a6d847, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=0f983e3e5be1/172.17.0.2:0 2024-12-08T00:46:10,929 DEBUG [RS:2;0f983e3e5be1:38841 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7bcddf74, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=0f983e3e5be1/172.17.0.2:0 2024-12-08T00:46:10,931 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-08T00:46:10,933 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-08T00:46:10,933 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-08T00:46:10,935 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-08T00:46:10,936 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-08T00:46:10,940 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/0f983e3e5be1:0:becomeActiveMaster-HFileCleaner.large.0-1733618770937,5,FailOnTimeoutGroup] 2024-12-08T00:46:10,940 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-08T00:46:10,940 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-08T00:46:10,941 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/0f983e3e5be1:0:becomeActiveMaster-HFileCleaner.small.0-1733618770940,5,FailOnTimeoutGroup] 2024-12-08T00:46:10,941 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-08T00:46:10,941 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-08T00:46:10,943 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-08T00:46:10,943 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-08T00:46:10,946 DEBUG [RS:0;0f983e3e5be1:44379 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;0f983e3e5be1:44379 2024-12-08T00:46:10,947 DEBUG [RS:2;0f983e3e5be1:38841 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:2;0f983e3e5be1:38841 2024-12-08T00:46:10,949 INFO [RS:0;0f983e3e5be1:44379 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-08T00:46:10,949 INFO [RS:2;0f983e3e5be1:38841 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-08T00:46:10,949 INFO [RS:2;0f983e3e5be1:38841 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-08T00:46:10,949 INFO [RS:0;0f983e3e5be1:44379 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-08T00:46:10,949 DEBUG [RS:2;0f983e3e5be1:38841 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-08T00:46:10,949 DEBUG [RS:0;0f983e3e5be1:44379 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-08T00:46:10,950 DEBUG [RS:1;0f983e3e5be1:33233 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;0f983e3e5be1:33233 2024-12-08T00:46:10,950 INFO [RS:1;0f983e3e5be1:33233 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-08T00:46:10,951 INFO [RS:1;0f983e3e5be1:33233 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-08T00:46:10,951 DEBUG [RS:1;0f983e3e5be1:33233 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-08T00:46:10,951 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_636387061_22 at /127.0.0.1:46556 [Receiving block BP-356801691-172.17.0.2-1733618765086:blk_-9223372036854775712_1012] {}] datanode.DataXceiver(331): 127.0.0.1:34549:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:46556 dst: /127.0.0.1:34549 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:46:10,952 INFO [RS:2;0f983e3e5be1:38841 {}] regionserver.HRegionServer(2659): reportForDuty to master=0f983e3e5be1,40457,1733618768954 with port=38841, startcode=1733618769837 2024-12-08T00:46:10,952 INFO [RS:0;0f983e3e5be1:44379 {}] regionserver.HRegionServer(2659): reportForDuty to master=0f983e3e5be1,40457,1733618768954 with port=44379, startcode=1733618769693 2024-12-08T00:46:10,952 INFO [RS:1;0f983e3e5be1:33233 {}] regionserver.HRegionServer(2659): reportForDuty to master=0f983e3e5be1,40457,1733618768954 with port=33233, startcode=1733618769793 2024-12-08T00:46:10,962 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34549 is added to blk_-9223372036854775712_1013 (size=1321) 2024-12-08T00:46:10,963 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-08T00:46:10,964 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:46415/user/jenkins/test-data/f88bb015-a4dc-a798-6f89-5983724f011a/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-08T00:46:10,965 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:46415/user/jenkins/test-data/f88bb015-a4dc-a798-6f89-5983724f011a 2024-12-08T00:46:10,965 DEBUG [RS:1;0f983e3e5be1:33233 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-08T00:46:10,965 DEBUG [RS:2;0f983e3e5be1:38841 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-08T00:46:10,969 DEBUG [RS:0;0f983e3e5be1:44379 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-08T00:46:10,977 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-08T00:46:10,978 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-08T00:46:10,981 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_636387061_22 at /127.0.0.1:46582 [Receiving block BP-356801691-172.17.0.2-1733618765086:blk_-9223372036854775696_1014] {}] datanode.DataXceiver(331): 127.0.0.1:34549:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:46582 dst: /127.0.0.1:34549 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:46:10,988 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34549 is added to blk_-9223372036854775696_1015 (size=32) 2024-12-08T00:46:10,992 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-08T00:46:10,993 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T00:46:10,996 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40453 is added to blk_-9223372036854775789_1002 (size=7) 2024-12-08T00:46:10,997 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-08T00:46:10,997 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34549 is added to blk_-9223372036854775788_1002 (size=7) 2024-12-08T00:46:11,000 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-08T00:46:11,000 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:46:11,001 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T00:46:11,002 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52783, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-12-08T00:46:11,002 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41467, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-12-08T00:46:11,002 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-08T00:46:11,002 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41775, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-12-08T00:46:11,004 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-08T00:46:11,004 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:46:11,005 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T00:46:11,006 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-08T00:46:11,008 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40457 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 0f983e3e5be1,33233,1733618769793 2024-12-08T00:46:11,008 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-08T00:46:11,008 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:46:11,009 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T00:46:11,009 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-08T00:46:11,010 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40457 {}] master.ServerManager(517): Registering regionserver=0f983e3e5be1,33233,1733618769793 2024-12-08T00:46:11,012 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-08T00:46:11,012 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:46:11,013 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T00:46:11,014 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-08T00:46:11,015 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46415/user/jenkins/test-data/f88bb015-a4dc-a798-6f89-5983724f011a/data/hbase/meta/1588230740 2024-12-08T00:46:11,016 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46415/user/jenkins/test-data/f88bb015-a4dc-a798-6f89-5983724f011a/data/hbase/meta/1588230740 2024-12-08T00:46:11,018 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-08T00:46:11,018 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-08T00:46:11,019 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-08T00:46:11,021 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-08T00:46:11,022 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40457 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 0f983e3e5be1,38841,1733618769837 2024-12-08T00:46:11,022 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40457 {}] master.ServerManager(517): Registering regionserver=0f983e3e5be1,38841,1733618769837 2024-12-08T00:46:11,026 DEBUG [RS:1;0f983e3e5be1:33233 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:46415/user/jenkins/test-data/f88bb015-a4dc-a798-6f89-5983724f011a 2024-12-08T00:46:11,026 DEBUG [RS:1;0f983e3e5be1:33233 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:46415 2024-12-08T00:46:11,026 DEBUG [RS:1;0f983e3e5be1:33233 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-08T00:46:11,027 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40457 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 0f983e3e5be1,44379,1733618769693 2024-12-08T00:46:11,028 DEBUG [RS:2;0f983e3e5be1:38841 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:46415/user/jenkins/test-data/f88bb015-a4dc-a798-6f89-5983724f011a 2024-12-08T00:46:11,028 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40457 {}] master.ServerManager(517): Registering regionserver=0f983e3e5be1,44379,1733618769693 2024-12-08T00:46:11,028 DEBUG [RS:2;0f983e3e5be1:38841 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:46415 2024-12-08T00:46:11,028 DEBUG [RS:2;0f983e3e5be1:38841 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-08T00:46:11,028 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46415/user/jenkins/test-data/f88bb015-a4dc-a798-6f89-5983724f011a/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-08T00:46:11,031 DEBUG [RS:0;0f983e3e5be1:44379 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:46415/user/jenkins/test-data/f88bb015-a4dc-a798-6f89-5983724f011a 2024-12-08T00:46:11,031 DEBUG [RS:0;0f983e3e5be1:44379 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:46415 2024-12-08T00:46:11,031 DEBUG [RS:0;0f983e3e5be1:44379 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-08T00:46:11,031 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=67777450, jitterRate=0.009962707757949829}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-08T00:46:11,033 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733618770993Initializing all the Stores at 1733618770996 (+3 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733618770996Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733618770997 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733618770997Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733618770997Cleaning up temporary data from old regions at 1733618771018 (+21 ms)Region opened successfully at 1733618771033 (+15 ms) 2024-12-08T00:46:11,033 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-08T00:46:11,033 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-08T00:46:11,034 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-08T00:46:11,034 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-08T00:46:11,034 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-08T00:46:11,035 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-08T00:46:11,035 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733618771033Disabling compacts and flushes for region at 1733618771033Disabling writes for close at 1733618771034 (+1 ms)Writing region close event to WAL at 1733618771034Closed at 1733618771035 (+1 ms) 2024-12-08T00:46:11,038 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-08T00:46:11,038 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-08T00:46:11,043 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-08T00:46:11,050 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-08T00:46:11,053 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-08T00:46:11,063 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40457-0x10002f0d7d50000, quorum=127.0.0.1:55931, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-08T00:46:11,092 DEBUG [RS:1;0f983e3e5be1:33233 {}] zookeeper.ZKUtil(111): regionserver:33233-0x10002f0d7d50002, quorum=127.0.0.1:55931, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/0f983e3e5be1,33233,1733618769793 2024-12-08T00:46:11,092 WARN [RS:1;0f983e3e5be1:33233 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-08T00:46:11,092 INFO [RS:1;0f983e3e5be1:33233 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-08T00:46:11,093 DEBUG [RS:2;0f983e3e5be1:38841 {}] zookeeper.ZKUtil(111): regionserver:38841-0x10002f0d7d50003, quorum=127.0.0.1:55931, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/0f983e3e5be1,38841,1733618769837 2024-12-08T00:46:11,093 DEBUG [RS:0;0f983e3e5be1:44379 {}] zookeeper.ZKUtil(111): regionserver:44379-0x10002f0d7d50001, quorum=127.0.0.1:55931, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/0f983e3e5be1,44379,1733618769693 2024-12-08T00:46:11,093 WARN [RS:2;0f983e3e5be1:38841 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-08T00:46:11,093 WARN [RS:0;0f983e3e5be1:44379 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-08T00:46:11,093 DEBUG [RS:1;0f983e3e5be1:33233 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:46415/user/jenkins/test-data/f88bb015-a4dc-a798-6f89-5983724f011a/WALs/0f983e3e5be1,33233,1733618769793 2024-12-08T00:46:11,093 INFO [RS:2;0f983e3e5be1:38841 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-08T00:46:11,093 INFO [RS:0;0f983e3e5be1:44379 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-08T00:46:11,093 DEBUG [RS:2;0f983e3e5be1:38841 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:46415/user/jenkins/test-data/f88bb015-a4dc-a798-6f89-5983724f011a/WALs/0f983e3e5be1,38841,1733618769837 2024-12-08T00:46:11,094 DEBUG [RS:0;0f983e3e5be1:44379 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:46415/user/jenkins/test-data/f88bb015-a4dc-a798-6f89-5983724f011a/WALs/0f983e3e5be1,44379,1733618769693 2024-12-08T00:46:11,095 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [0f983e3e5be1,38841,1733618769837] 2024-12-08T00:46:11,095 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [0f983e3e5be1,33233,1733618769793] 2024-12-08T00:46:11,096 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [0f983e3e5be1,44379,1733618769693] 2024-12-08T00:46:11,118 INFO [RS:2;0f983e3e5be1:38841 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-08T00:46:11,118 INFO [RS:0;0f983e3e5be1:44379 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-08T00:46:11,118 INFO [RS:1;0f983e3e5be1:33233 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-08T00:46:11,132 INFO [RS:2;0f983e3e5be1:38841 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-08T00:46:11,132 INFO [RS:0;0f983e3e5be1:44379 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-08T00:46:11,132 INFO [RS:1;0f983e3e5be1:33233 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-08T00:46:11,136 INFO [RS:2;0f983e3e5be1:38841 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-08T00:46:11,137 INFO [RS:0;0f983e3e5be1:44379 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-08T00:46:11,137 INFO [RS:1;0f983e3e5be1:33233 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-08T00:46:11,137 INFO [RS:2;0f983e3e5be1:38841 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-08T00:46:11,137 INFO [RS:0;0f983e3e5be1:44379 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-08T00:46:11,137 INFO [RS:1;0f983e3e5be1:33233 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-08T00:46:11,138 INFO [RS:0;0f983e3e5be1:44379 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-08T00:46:11,138 INFO [RS:2;0f983e3e5be1:38841 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-08T00:46:11,138 INFO [RS:1;0f983e3e5be1:33233 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-08T00:46:11,143 INFO [RS:2;0f983e3e5be1:38841 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-08T00:46:11,143 INFO [RS:0;0f983e3e5be1:44379 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-08T00:46:11,143 INFO [RS:1;0f983e3e5be1:33233 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-08T00:46:11,145 INFO [RS:2;0f983e3e5be1:38841 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-08T00:46:11,145 INFO [RS:0;0f983e3e5be1:44379 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-08T00:46:11,145 INFO [RS:1;0f983e3e5be1:33233 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-08T00:46:11,145 DEBUG [RS:2;0f983e3e5be1:38841 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/0f983e3e5be1:0, corePoolSize=1, maxPoolSize=1 2024-12-08T00:46:11,145 DEBUG [RS:0;0f983e3e5be1:44379 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/0f983e3e5be1:0, corePoolSize=1, maxPoolSize=1 2024-12-08T00:46:11,145 DEBUG [RS:1;0f983e3e5be1:33233 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/0f983e3e5be1:0, corePoolSize=1, maxPoolSize=1 2024-12-08T00:46:11,145 DEBUG [RS:2;0f983e3e5be1:38841 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/0f983e3e5be1:0, corePoolSize=1, maxPoolSize=1 2024-12-08T00:46:11,145 DEBUG [RS:1;0f983e3e5be1:33233 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/0f983e3e5be1:0, corePoolSize=1, maxPoolSize=1 2024-12-08T00:46:11,145 DEBUG [RS:0;0f983e3e5be1:44379 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/0f983e3e5be1:0, corePoolSize=1, maxPoolSize=1 2024-12-08T00:46:11,145 DEBUG [RS:2;0f983e3e5be1:38841 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/0f983e3e5be1:0, corePoolSize=1, maxPoolSize=1 2024-12-08T00:46:11,146 DEBUG [RS:1;0f983e3e5be1:33233 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/0f983e3e5be1:0, corePoolSize=1, maxPoolSize=1 2024-12-08T00:46:11,146 DEBUG [RS:0;0f983e3e5be1:44379 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/0f983e3e5be1:0, corePoolSize=1, maxPoolSize=1 2024-12-08T00:46:11,146 DEBUG [RS:2;0f983e3e5be1:38841 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/0f983e3e5be1:0, corePoolSize=1, maxPoolSize=1 2024-12-08T00:46:11,146 DEBUG [RS:2;0f983e3e5be1:38841 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/0f983e3e5be1:0, corePoolSize=1, maxPoolSize=1 2024-12-08T00:46:11,146 DEBUG [RS:1;0f983e3e5be1:33233 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/0f983e3e5be1:0, corePoolSize=1, maxPoolSize=1 2024-12-08T00:46:11,146 DEBUG [RS:0;0f983e3e5be1:44379 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/0f983e3e5be1:0, corePoolSize=1, maxPoolSize=1 2024-12-08T00:46:11,146 DEBUG [RS:2;0f983e3e5be1:38841 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/0f983e3e5be1:0, corePoolSize=2, maxPoolSize=2 2024-12-08T00:46:11,146 DEBUG [RS:1;0f983e3e5be1:33233 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/0f983e3e5be1:0, corePoolSize=1, maxPoolSize=1 2024-12-08T00:46:11,146 DEBUG [RS:0;0f983e3e5be1:44379 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/0f983e3e5be1:0, corePoolSize=1, maxPoolSize=1 2024-12-08T00:46:11,146 DEBUG [RS:2;0f983e3e5be1:38841 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/0f983e3e5be1:0, corePoolSize=1, maxPoolSize=1 2024-12-08T00:46:11,146 DEBUG [RS:1;0f983e3e5be1:33233 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/0f983e3e5be1:0, corePoolSize=2, maxPoolSize=2 2024-12-08T00:46:11,146 DEBUG [RS:0;0f983e3e5be1:44379 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/0f983e3e5be1:0, corePoolSize=2, maxPoolSize=2 2024-12-08T00:46:11,146 DEBUG [RS:2;0f983e3e5be1:38841 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/0f983e3e5be1:0, corePoolSize=1, maxPoolSize=1 2024-12-08T00:46:11,146 DEBUG [RS:2;0f983e3e5be1:38841 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/0f983e3e5be1:0, corePoolSize=1, maxPoolSize=1 2024-12-08T00:46:11,146 DEBUG [RS:1;0f983e3e5be1:33233 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/0f983e3e5be1:0, corePoolSize=1, maxPoolSize=1 2024-12-08T00:46:11,146 DEBUG [RS:0;0f983e3e5be1:44379 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/0f983e3e5be1:0, corePoolSize=1, maxPoolSize=1 2024-12-08T00:46:11,146 DEBUG [RS:2;0f983e3e5be1:38841 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/0f983e3e5be1:0, corePoolSize=1, maxPoolSize=1 2024-12-08T00:46:11,146 DEBUG [RS:0;0f983e3e5be1:44379 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/0f983e3e5be1:0, corePoolSize=1, maxPoolSize=1 2024-12-08T00:46:11,146 DEBUG [RS:1;0f983e3e5be1:33233 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/0f983e3e5be1:0, corePoolSize=1, maxPoolSize=1 2024-12-08T00:46:11,146 DEBUG [RS:2;0f983e3e5be1:38841 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/0f983e3e5be1:0, corePoolSize=1, maxPoolSize=1 2024-12-08T00:46:11,146 DEBUG [RS:2;0f983e3e5be1:38841 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/0f983e3e5be1:0, corePoolSize=1, maxPoolSize=1 2024-12-08T00:46:11,146 DEBUG [RS:0;0f983e3e5be1:44379 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/0f983e3e5be1:0, corePoolSize=1, maxPoolSize=1 2024-12-08T00:46:11,146 DEBUG [RS:1;0f983e3e5be1:33233 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/0f983e3e5be1:0, corePoolSize=1, maxPoolSize=1 2024-12-08T00:46:11,147 DEBUG [RS:2;0f983e3e5be1:38841 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/0f983e3e5be1:0, corePoolSize=3, maxPoolSize=3 2024-12-08T00:46:11,147 DEBUG [RS:0;0f983e3e5be1:44379 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/0f983e3e5be1:0, corePoolSize=1, maxPoolSize=1 2024-12-08T00:46:11,147 DEBUG [RS:1;0f983e3e5be1:33233 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/0f983e3e5be1:0, corePoolSize=1, maxPoolSize=1 2024-12-08T00:46:11,147 DEBUG [RS:2;0f983e3e5be1:38841 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/0f983e3e5be1:0, corePoolSize=3, maxPoolSize=3 2024-12-08T00:46:11,147 DEBUG [RS:0;0f983e3e5be1:44379 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/0f983e3e5be1:0, corePoolSize=1, maxPoolSize=1 2024-12-08T00:46:11,147 DEBUG [RS:1;0f983e3e5be1:33233 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/0f983e3e5be1:0, corePoolSize=1, maxPoolSize=1 2024-12-08T00:46:11,147 DEBUG [RS:1;0f983e3e5be1:33233 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/0f983e3e5be1:0, corePoolSize=1, maxPoolSize=1 2024-12-08T00:46:11,147 DEBUG [RS:0;0f983e3e5be1:44379 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/0f983e3e5be1:0, corePoolSize=1, maxPoolSize=1 2024-12-08T00:46:11,147 DEBUG [RS:1;0f983e3e5be1:33233 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/0f983e3e5be1:0, corePoolSize=3, maxPoolSize=3 2024-12-08T00:46:11,147 DEBUG [RS:0;0f983e3e5be1:44379 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/0f983e3e5be1:0, corePoolSize=3, maxPoolSize=3 2024-12-08T00:46:11,147 DEBUG [RS:0;0f983e3e5be1:44379 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/0f983e3e5be1:0, corePoolSize=3, maxPoolSize=3 2024-12-08T00:46:11,147 DEBUG [RS:1;0f983e3e5be1:33233 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/0f983e3e5be1:0, corePoolSize=3, maxPoolSize=3 2024-12-08T00:46:11,151 INFO [RS:2;0f983e3e5be1:38841 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-08T00:46:11,151 INFO [RS:2;0f983e3e5be1:38841 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-08T00:46:11,151 INFO [RS:2;0f983e3e5be1:38841 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-08T00:46:11,151 INFO [RS:2;0f983e3e5be1:38841 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-08T00:46:11,151 INFO [RS:2;0f983e3e5be1:38841 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-08T00:46:11,151 INFO [RS:0;0f983e3e5be1:44379 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-08T00:46:11,151 INFO [RS:2;0f983e3e5be1:38841 {}] hbase.ChoreService(168): Chore ScheduledChore name=0f983e3e5be1,38841,1733618769837-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-08T00:46:11,151 INFO [RS:1;0f983e3e5be1:33233 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-08T00:46:11,151 INFO [RS:0;0f983e3e5be1:44379 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-08T00:46:11,151 INFO [RS:1;0f983e3e5be1:33233 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-08T00:46:11,151 INFO [RS:0;0f983e3e5be1:44379 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-08T00:46:11,152 INFO [RS:1;0f983e3e5be1:33233 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-08T00:46:11,152 INFO [RS:0;0f983e3e5be1:44379 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-08T00:46:11,152 INFO [RS:1;0f983e3e5be1:33233 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-08T00:46:11,152 INFO [RS:0;0f983e3e5be1:44379 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-08T00:46:11,152 INFO [RS:1;0f983e3e5be1:33233 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-08T00:46:11,152 INFO [RS:1;0f983e3e5be1:33233 {}] hbase.ChoreService(168): Chore ScheduledChore name=0f983e3e5be1,33233,1733618769793-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-08T00:46:11,152 INFO [RS:0;0f983e3e5be1:44379 {}] hbase.ChoreService(168): Chore ScheduledChore name=0f983e3e5be1,44379,1733618769693-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-08T00:46:11,172 INFO [RS:0;0f983e3e5be1:44379 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-08T00:46:11,172 INFO [RS:1;0f983e3e5be1:33233 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-08T00:46:11,172 INFO [RS:2;0f983e3e5be1:38841 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-08T00:46:11,174 INFO [RS:2;0f983e3e5be1:38841 {}] hbase.ChoreService(168): Chore ScheduledChore name=0f983e3e5be1,38841,1733618769837-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-08T00:46:11,174 INFO [RS:1;0f983e3e5be1:33233 {}] hbase.ChoreService(168): Chore ScheduledChore name=0f983e3e5be1,33233,1733618769793-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-08T00:46:11,174 INFO [RS:0;0f983e3e5be1:44379 {}] hbase.ChoreService(168): Chore ScheduledChore name=0f983e3e5be1,44379,1733618769693-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-08T00:46:11,175 INFO [RS:2;0f983e3e5be1:38841 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T00:46:11,175 INFO [RS:1;0f983e3e5be1:33233 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T00:46:11,175 INFO [RS:0;0f983e3e5be1:44379 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T00:46:11,175 INFO [RS:2;0f983e3e5be1:38841 {}] regionserver.Replication(171): 0f983e3e5be1,38841,1733618769837 started 2024-12-08T00:46:11,175 INFO [RS:1;0f983e3e5be1:33233 {}] regionserver.Replication(171): 0f983e3e5be1,33233,1733618769793 started 2024-12-08T00:46:11,175 INFO [RS:0;0f983e3e5be1:44379 {}] regionserver.Replication(171): 0f983e3e5be1,44379,1733618769693 started 2024-12-08T00:46:11,191 INFO [RS:2;0f983e3e5be1:38841 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T00:46:11,191 INFO [RS:0;0f983e3e5be1:44379 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T00:46:11,191 INFO [RS:1;0f983e3e5be1:33233 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T00:46:11,192 INFO [RS:1;0f983e3e5be1:33233 {}] regionserver.HRegionServer(1482): Serving as 0f983e3e5be1,33233,1733618769793, RpcServer on 0f983e3e5be1/172.17.0.2:33233, sessionid=0x10002f0d7d50002 2024-12-08T00:46:11,192 INFO [RS:0;0f983e3e5be1:44379 {}] regionserver.HRegionServer(1482): Serving as 0f983e3e5be1,44379,1733618769693, RpcServer on 0f983e3e5be1/172.17.0.2:44379, sessionid=0x10002f0d7d50001 2024-12-08T00:46:11,192 INFO [RS:2;0f983e3e5be1:38841 {}] regionserver.HRegionServer(1482): Serving as 0f983e3e5be1,38841,1733618769837, RpcServer on 0f983e3e5be1/172.17.0.2:38841, sessionid=0x10002f0d7d50003 2024-12-08T00:46:11,192 DEBUG [RS:2;0f983e3e5be1:38841 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-08T00:46:11,192 DEBUG [RS:1;0f983e3e5be1:33233 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-08T00:46:11,192 DEBUG [RS:0;0f983e3e5be1:44379 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-08T00:46:11,192 DEBUG [RS:2;0f983e3e5be1:38841 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 0f983e3e5be1,38841,1733618769837 2024-12-08T00:46:11,192 DEBUG [RS:1;0f983e3e5be1:33233 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 0f983e3e5be1,33233,1733618769793 2024-12-08T00:46:11,193 DEBUG [RS:0;0f983e3e5be1:44379 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 0f983e3e5be1,44379,1733618769693 2024-12-08T00:46:11,193 DEBUG [RS:2;0f983e3e5be1:38841 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '0f983e3e5be1,38841,1733618769837' 2024-12-08T00:46:11,193 DEBUG [RS:1;0f983e3e5be1:33233 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '0f983e3e5be1,33233,1733618769793' 2024-12-08T00:46:11,193 DEBUG [RS:0;0f983e3e5be1:44379 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '0f983e3e5be1,44379,1733618769693' 2024-12-08T00:46:11,193 DEBUG [RS:2;0f983e3e5be1:38841 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-08T00:46:11,193 DEBUG [RS:1;0f983e3e5be1:33233 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-08T00:46:11,193 DEBUG [RS:0;0f983e3e5be1:44379 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-08T00:46:11,194 DEBUG [RS:1;0f983e3e5be1:33233 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-08T00:46:11,194 DEBUG [RS:2;0f983e3e5be1:38841 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-08T00:46:11,194 DEBUG [RS:0;0f983e3e5be1:44379 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-08T00:46:11,194 DEBUG [RS:1;0f983e3e5be1:33233 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-08T00:46:11,194 DEBUG [RS:2;0f983e3e5be1:38841 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-08T00:46:11,194 DEBUG [RS:0;0f983e3e5be1:44379 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-08T00:46:11,194 DEBUG [RS:2;0f983e3e5be1:38841 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-08T00:46:11,194 DEBUG [RS:1;0f983e3e5be1:33233 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-08T00:46:11,194 DEBUG [RS:0;0f983e3e5be1:44379 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-08T00:46:11,194 DEBUG [RS:2;0f983e3e5be1:38841 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 0f983e3e5be1,38841,1733618769837 2024-12-08T00:46:11,194 DEBUG [RS:0;0f983e3e5be1:44379 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 0f983e3e5be1,44379,1733618769693 2024-12-08T00:46:11,194 DEBUG [RS:1;0f983e3e5be1:33233 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 0f983e3e5be1,33233,1733618769793 2024-12-08T00:46:11,194 DEBUG [RS:0;0f983e3e5be1:44379 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '0f983e3e5be1,44379,1733618769693' 2024-12-08T00:46:11,194 DEBUG [RS:2;0f983e3e5be1:38841 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '0f983e3e5be1,38841,1733618769837' 2024-12-08T00:46:11,194 DEBUG [RS:1;0f983e3e5be1:33233 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '0f983e3e5be1,33233,1733618769793' 2024-12-08T00:46:11,194 DEBUG [RS:0;0f983e3e5be1:44379 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-08T00:46:11,194 DEBUG [RS:2;0f983e3e5be1:38841 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-08T00:46:11,194 DEBUG [RS:1;0f983e3e5be1:33233 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-08T00:46:11,195 DEBUG [RS:0;0f983e3e5be1:44379 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-08T00:46:11,195 DEBUG [RS:2;0f983e3e5be1:38841 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-08T00:46:11,195 DEBUG [RS:1;0f983e3e5be1:33233 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-08T00:46:11,195 DEBUG [RS:0;0f983e3e5be1:44379 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-08T00:46:11,195 DEBUG [RS:2;0f983e3e5be1:38841 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-08T00:46:11,195 DEBUG [RS:1;0f983e3e5be1:33233 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-08T00:46:11,195 INFO [RS:2;0f983e3e5be1:38841 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-08T00:46:11,195 INFO [RS:0;0f983e3e5be1:44379 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-08T00:46:11,196 INFO [RS:1;0f983e3e5be1:33233 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-08T00:46:11,196 INFO [RS:2;0f983e3e5be1:38841 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-08T00:46:11,196 INFO [RS:0;0f983e3e5be1:44379 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-08T00:46:11,196 INFO [RS:1;0f983e3e5be1:33233 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-08T00:46:11,204 WARN [0f983e3e5be1:40457 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-08T00:46:11,302 INFO [RS:2;0f983e3e5be1:38841 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-08T00:46:11,302 INFO [RS:0;0f983e3e5be1:44379 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-08T00:46:11,302 INFO [RS:1;0f983e3e5be1:33233 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-08T00:46:11,306 INFO [RS:2;0f983e3e5be1:38841 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=0f983e3e5be1%2C38841%2C1733618769837, suffix=, logDir=hdfs://localhost:46415/user/jenkins/test-data/f88bb015-a4dc-a798-6f89-5983724f011a/WALs/0f983e3e5be1,38841,1733618769837, archiveDir=hdfs://localhost:46415/user/jenkins/test-data/f88bb015-a4dc-a798-6f89-5983724f011a/oldWALs, maxLogs=32 2024-12-08T00:46:11,306 INFO [RS:0;0f983e3e5be1:44379 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=0f983e3e5be1%2C44379%2C1733618769693, suffix=, logDir=hdfs://localhost:46415/user/jenkins/test-data/f88bb015-a4dc-a798-6f89-5983724f011a/WALs/0f983e3e5be1,44379,1733618769693, archiveDir=hdfs://localhost:46415/user/jenkins/test-data/f88bb015-a4dc-a798-6f89-5983724f011a/oldWALs, maxLogs=32 2024-12-08T00:46:11,306 INFO [RS:1;0f983e3e5be1:33233 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=0f983e3e5be1%2C33233%2C1733618769793, suffix=, logDir=hdfs://localhost:46415/user/jenkins/test-data/f88bb015-a4dc-a798-6f89-5983724f011a/WALs/0f983e3e5be1,33233,1733618769793, archiveDir=hdfs://localhost:46415/user/jenkins/test-data/f88bb015-a4dc-a798-6f89-5983724f011a/oldWALs, maxLogs=32 2024-12-08T00:46:11,324 DEBUG [RS:2;0f983e3e5be1:38841 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/f88bb015-a4dc-a798-6f89-5983724f011a/WALs/0f983e3e5be1,38841,1733618769837/0f983e3e5be1%2C38841%2C1733618769837.1733618771311, exclude list is [], retry=0 2024-12-08T00:46:11,325 DEBUG [RS:1;0f983e3e5be1:33233 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/f88bb015-a4dc-a798-6f89-5983724f011a/WALs/0f983e3e5be1,33233,1733618769793/0f983e3e5be1%2C33233%2C1733618769793.1733618771312, exclude list is [], retry=0 2024-12-08T00:46:11,327 DEBUG [RS:0;0f983e3e5be1:44379 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/f88bb015-a4dc-a798-6f89-5983724f011a/WALs/0f983e3e5be1,44379,1733618769693/0f983e3e5be1%2C44379%2C1733618769693.1733618771312, exclude list is [], retry=0 2024-12-08T00:46:11,329 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:34549,DS-db17d945-d425-4a3e-9c52-f7aeafd06e0d,DISK] 2024-12-08T00:46:11,330 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:34549,DS-db17d945-d425-4a3e-9c52-f7aeafd06e0d,DISK] 2024-12-08T00:46:11,330 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:40453,DS-8e40799e-57e0-4759-b491-cb8bbcd0ba02,DISK] 2024-12-08T00:46:11,330 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:41981,DS-adecbfca-b5d6-49b5-afa5-f05e8830b783,DISK] 2024-12-08T00:46:11,330 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:41981,DS-adecbfca-b5d6-49b5-afa5-f05e8830b783,DISK] 2024-12-08T00:46:11,330 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:40453,DS-8e40799e-57e0-4759-b491-cb8bbcd0ba02,DISK] 2024-12-08T00:46:11,353 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:40453,DS-8e40799e-57e0-4759-b491-cb8bbcd0ba02,DISK] 2024-12-08T00:46:11,353 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:41981,DS-adecbfca-b5d6-49b5-afa5-f05e8830b783,DISK] 2024-12-08T00:46:11,353 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:34549,DS-db17d945-d425-4a3e-9c52-f7aeafd06e0d,DISK] 2024-12-08T00:46:11,363 INFO [RS:2;0f983e3e5be1:38841 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/f88bb015-a4dc-a798-6f89-5983724f011a/WALs/0f983e3e5be1,38841,1733618769837/0f983e3e5be1%2C38841%2C1733618769837.1733618771311 2024-12-08T00:46:11,363 INFO [RS:1;0f983e3e5be1:33233 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/f88bb015-a4dc-a798-6f89-5983724f011a/WALs/0f983e3e5be1,33233,1733618769793/0f983e3e5be1%2C33233%2C1733618769793.1733618771312 2024-12-08T00:46:11,363 INFO [RS:0;0f983e3e5be1:44379 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/f88bb015-a4dc-a798-6f89-5983724f011a/WALs/0f983e3e5be1,44379,1733618769693/0f983e3e5be1%2C44379%2C1733618769693.1733618771312 2024-12-08T00:46:11,364 DEBUG [RS:2;0f983e3e5be1:38841 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:38021:38021),(127.0.0.1/127.0.0.1:45419:45419),(127.0.0.1/127.0.0.1:40355:40355)] 2024-12-08T00:46:11,364 DEBUG [RS:1;0f983e3e5be1:33233 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:40355:40355),(127.0.0.1/127.0.0.1:45419:45419),(127.0.0.1/127.0.0.1:38021:38021)] 2024-12-08T00:46:11,364 DEBUG [RS:0;0f983e3e5be1:44379 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:45419:45419),(127.0.0.1/127.0.0.1:38021:38021),(127.0.0.1/127.0.0.1:40355:40355)] 2024-12-08T00:46:11,458 DEBUG [0f983e3e5be1:40457 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=3, allServersCount=3 2024-12-08T00:46:11,469 DEBUG [0f983e3e5be1:40457 {}] balancer.BalancerClusterState(204): Hosts are {0f983e3e5be1=0} racks are {/default-rack=0} 2024-12-08T00:46:11,475 DEBUG [0f983e3e5be1:40457 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-08T00:46:11,476 DEBUG [0f983e3e5be1:40457 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-08T00:46:11,476 DEBUG [0f983e3e5be1:40457 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-08T00:46:11,476 DEBUG [0f983e3e5be1:40457 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-08T00:46:11,476 DEBUG [0f983e3e5be1:40457 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-08T00:46:11,476 DEBUG [0f983e3e5be1:40457 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-08T00:46:11,476 INFO [0f983e3e5be1:40457 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-08T00:46:11,476 INFO [0f983e3e5be1:40457 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-08T00:46:11,476 INFO [0f983e3e5be1:40457 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-08T00:46:11,476 DEBUG [0f983e3e5be1:40457 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-08T00:46:11,483 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=0f983e3e5be1,38841,1733618769837 2024-12-08T00:46:11,488 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 0f983e3e5be1,38841,1733618769837, state=OPENING 2024-12-08T00:46:11,564 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-08T00:46:11,572 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33233-0x10002f0d7d50002, quorum=127.0.0.1:55931, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T00:46:11,572 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38841-0x10002f0d7d50003, quorum=127.0.0.1:55931, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T00:46:11,572 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44379-0x10002f0d7d50001, quorum=127.0.0.1:55931, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T00:46:11,573 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40457-0x10002f0d7d50000, quorum=127.0.0.1:55931, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T00:46:11,575 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-08T00:46:11,575 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-08T00:46:11,575 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-08T00:46:11,575 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-08T00:46:11,578 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-08T00:46:11,580 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=0f983e3e5be1,38841,1733618769837}] 2024-12-08T00:46:11,761 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-08T00:46:11,762 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53939, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-08T00:46:11,776 INFO [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-08T00:46:11,777 INFO [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-08T00:46:11,777 INFO [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-12-08T00:46:11,780 INFO [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=0f983e3e5be1%2C38841%2C1733618769837.meta, suffix=.meta, logDir=hdfs://localhost:46415/user/jenkins/test-data/f88bb015-a4dc-a798-6f89-5983724f011a/WALs/0f983e3e5be1,38841,1733618769837, archiveDir=hdfs://localhost:46415/user/jenkins/test-data/f88bb015-a4dc-a798-6f89-5983724f011a/oldWALs, maxLogs=32 2024-12-08T00:46:11,794 DEBUG [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/f88bb015-a4dc-a798-6f89-5983724f011a/WALs/0f983e3e5be1,38841,1733618769837/0f983e3e5be1%2C38841%2C1733618769837.meta.1733618771781.meta, exclude list is [], retry=0 2024-12-08T00:46:11,797 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:40453,DS-8e40799e-57e0-4759-b491-cb8bbcd0ba02,DISK] 2024-12-08T00:46:11,797 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:41981,DS-adecbfca-b5d6-49b5-afa5-f05e8830b783,DISK] 2024-12-08T00:46:11,797 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:34549,DS-db17d945-d425-4a3e-9c52-f7aeafd06e0d,DISK] 2024-12-08T00:46:11,800 INFO [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/f88bb015-a4dc-a798-6f89-5983724f011a/WALs/0f983e3e5be1,38841,1733618769837/0f983e3e5be1%2C38841%2C1733618769837.meta.1733618771781.meta 2024-12-08T00:46:11,800 DEBUG [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:38021:38021),(127.0.0.1/127.0.0.1:40355:40355),(127.0.0.1/127.0.0.1:45419:45419)] 2024-12-08T00:46:11,801 DEBUG [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-08T00:46:11,802 DEBUG [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-08T00:46:11,804 DEBUG [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-08T00:46:11,808 INFO [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-08T00:46:11,811 DEBUG [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-08T00:46:11,812 DEBUG [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T00:46:11,812 DEBUG [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-08T00:46:11,812 DEBUG [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-08T00:46:11,815 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-08T00:46:11,816 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-08T00:46:11,817 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:46:11,817 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T00:46:11,818 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-08T00:46:11,819 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-08T00:46:11,819 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:46:11,820 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T00:46:11,820 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-08T00:46:11,821 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-08T00:46:11,822 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:46:11,822 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T00:46:11,823 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-08T00:46:11,824 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-08T00:46:11,824 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:46:11,825 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T00:46:11,825 DEBUG [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-08T00:46:11,826 DEBUG [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46415/user/jenkins/test-data/f88bb015-a4dc-a798-6f89-5983724f011a/data/hbase/meta/1588230740 2024-12-08T00:46:11,829 DEBUG [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46415/user/jenkins/test-data/f88bb015-a4dc-a798-6f89-5983724f011a/data/hbase/meta/1588230740 2024-12-08T00:46:11,830 DEBUG [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-08T00:46:11,831 DEBUG [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-08T00:46:11,831 DEBUG [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-08T00:46:11,833 DEBUG [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-08T00:46:11,835 INFO [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=61402613, jitterRate=-0.08502976596355438}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-08T00:46:11,835 DEBUG [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-08T00:46:11,836 DEBUG [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733618771812Writing region info on filesystem at 1733618771813 (+1 ms)Initializing all the Stores at 1733618771814 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733618771814Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733618771815 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733618771815Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733618771815Cleaning up temporary data from old regions at 1733618771831 (+16 ms)Running coprocessor post-open hooks at 1733618771835 (+4 ms)Region opened successfully at 1733618771836 (+1 ms) 2024-12-08T00:46:11,842 INFO [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733618771753 2024-12-08T00:46:11,852 DEBUG [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-08T00:46:11,852 INFO [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-08T00:46:11,854 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=0f983e3e5be1,38841,1733618769837 2024-12-08T00:46:11,856 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 0f983e3e5be1,38841,1733618769837, state=OPEN 2024-12-08T00:46:11,888 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33233-0x10002f0d7d50002, quorum=127.0.0.1:55931, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-08T00:46:11,888 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44379-0x10002f0d7d50001, quorum=127.0.0.1:55931, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-08T00:46:11,888 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38841-0x10002f0d7d50003, quorum=127.0.0.1:55931, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-08T00:46:11,888 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40457-0x10002f0d7d50000, quorum=127.0.0.1:55931, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-08T00:46:11,888 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-08T00:46:11,888 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-08T00:46:11,888 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-08T00:46:11,888 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-08T00:46:11,889 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=0f983e3e5be1,38841,1733618769837 2024-12-08T00:46:11,898 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-08T00:46:11,898 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=0f983e3e5be1,38841,1733618769837 in 309 msec 2024-12-08T00:46:11,906 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-08T00:46:11,906 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 857 msec 2024-12-08T00:46:11,908 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-08T00:46:11,908 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-08T00:46:11,926 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-08T00:46:11,927 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=0f983e3e5be1,38841,1733618769837, seqNum=-1] 2024-12-08T00:46:11,946 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T00:46:11,948 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42237, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T00:46:11,967 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.1370 sec 2024-12-08T00:46:11,967 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733618771967, completionTime=-1 2024-12-08T00:46:11,969 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=3; waited=0ms, expected min=3 server(s), max=3 server(s), master is running 2024-12-08T00:46:11,969 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-08T00:46:11,991 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=3 2024-12-08T00:46:11,991 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733618831991 2024-12-08T00:46:11,991 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733618891991 2024-12-08T00:46:11,991 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 22 msec 2024-12-08T00:46:11,993 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 1588230740 changed from -1.0 to 0.0, refreshing cache 2024-12-08T00:46:11,998 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0f983e3e5be1,40457,1733618768954-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-08T00:46:11,999 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0f983e3e5be1,40457,1733618768954-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T00:46:11,999 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0f983e3e5be1,40457,1733618768954-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T00:46:12,000 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-0f983e3e5be1:40457, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T00:46:12,000 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-08T00:46:12,001 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-08T00:46:12,006 DEBUG [master/0f983e3e5be1:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-08T00:46:12,028 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 2.092sec 2024-12-08T00:46:12,030 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-08T00:46:12,031 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-08T00:46:12,032 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-08T00:46:12,032 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-08T00:46:12,033 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-08T00:46:12,033 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0f983e3e5be1,40457,1733618768954-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-08T00:46:12,034 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0f983e3e5be1,40457,1733618768954-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-08T00:46:12,038 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-08T00:46:12,039 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-08T00:46:12,039 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0f983e3e5be1,40457,1733618768954-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T00:46:12,078 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@296587b6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T00:46:12,082 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-12-08T00:46:12,082 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-12-08T00:46:12,084 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 0f983e3e5be1,40457,-1 for getting cluster id 2024-12-08T00:46:12,086 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-08T00:46:12,093 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '2d1b2c2a-5c77-43b0-aa0b-b5232f4fe46d' 2024-12-08T00:46:12,095 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-08T00:46:12,095 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "2d1b2c2a-5c77-43b0-aa0b-b5232f4fe46d" 2024-12-08T00:46:12,096 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@e730f95, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T00:46:12,096 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [0f983e3e5be1,40457,-1] 2024-12-08T00:46:12,098 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-08T00:46:12,099 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T00:46:12,101 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57566, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-08T00:46:12,103 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7c738163, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T00:46:12,104 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-08T00:46:12,110 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=0f983e3e5be1,38841,1733618769837, seqNum=-1] 2024-12-08T00:46:12,111 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T00:46:12,113 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57768, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T00:46:12,146 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=0f983e3e5be1,40457,1733618768954 2024-12-08T00:46:12,150 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-08T00:46:12,155 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.AsyncConnectionImpl(321): The fetched master address is 0f983e3e5be1,40457,1733618768954 2024-12-08T00:46:12,158 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@26ad8a71 2024-12-08T00:46:12,160 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-08T00:46:12,162 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57580, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-08T00:46:12,171 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40457 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-08T00:46:12,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40457 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC 2024-12-08T00:46:12,182 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_PRE_OPERATION 2024-12-08T00:46:12,185 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40457 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestHBaseWalOnEC" procId is: 4 2024-12-08T00:46:12,186 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:46:12,189 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-08T00:46:12,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40457 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-08T00:46:12,198 WARN [PEWorker-3 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-08T00:46:12,198 WARN [PEWorker-3 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-08T00:46:12,204 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_636387061_22 at /127.0.0.1:46644 [Receiving block BP-356801691-172.17.0.2-1733618765086:blk_-9223372036854775680_1020] {}] datanode.DataXceiver(331): 127.0.0.1:34549:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:46644 dst: /127.0.0.1:34549 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:46:12,208 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34549 is added to blk_-9223372036854775680_1021 (size=392) 2024-12-08T00:46:12,209 WARN [PEWorker-3 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-08T00:46:12,212 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 917f5ae6d5f9863aa098e1ff4c897aa6, NAME => 'TestHBaseWalOnEC,,1733618772163.917f5ae6d5f9863aa098e1ff4c897aa6.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:46415/user/jenkins/test-data/f88bb015-a4dc-a798-6f89-5983724f011a 2024-12-08T00:46:12,218 WARN [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-08T00:46:12,218 WARN [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-08T00:46:12,225 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_636387061_22 at /127.0.0.1:46672 [Receiving block BP-356801691-172.17.0.2-1733618765086:blk_-9223372036854775664_1022] {}] datanode.DataXceiver(331): 127.0.0.1:34549:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:46672 dst: /127.0.0.1:34549 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:46:12,229 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34549 is added to blk_-9223372036854775664_1023 (size=51) 2024-12-08T00:46:12,230 WARN [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-08T00:46:12,231 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1733618772163.917f5ae6d5f9863aa098e1ff4c897aa6.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T00:46:12,231 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1722): Closing 917f5ae6d5f9863aa098e1ff4c897aa6, disabling compactions & flushes 2024-12-08T00:46:12,231 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1733618772163.917f5ae6d5f9863aa098e1ff4c897aa6. 2024-12-08T00:46:12,231 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1733618772163.917f5ae6d5f9863aa098e1ff4c897aa6. 2024-12-08T00:46:12,231 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1733618772163.917f5ae6d5f9863aa098e1ff4c897aa6. after waiting 0 ms 2024-12-08T00:46:12,231 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1733618772163.917f5ae6d5f9863aa098e1ff4c897aa6. 2024-12-08T00:46:12,231 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1733618772163.917f5ae6d5f9863aa098e1ff4c897aa6. 2024-12-08T00:46:12,231 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1676): Region close journal for 917f5ae6d5f9863aa098e1ff4c897aa6: Waiting for close lock at 1733618772231Disabling compacts and flushes for region at 1733618772231Disabling writes for close at 1733618772231Writing region close event to WAL at 1733618772231Closed at 1733618772231 2024-12-08T00:46:12,233 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ADD_TO_META 2024-12-08T00:46:12,238 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestHBaseWalOnEC,,1733618772163.917f5ae6d5f9863aa098e1ff4c897aa6.","families":{"info":[{"qualifier":"regioninfo","vlen":50,"tag":[],"timestamp":"1733618772234"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733618772234"}]},"ts":"1733618772234"} 2024-12-08T00:46:12,243 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-08T00:46:12,244 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-08T00:46:12,247 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733618772245"}]},"ts":"1733618772245"} 2024-12-08T00:46:12,252 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLING in hbase:meta 2024-12-08T00:46:12,252 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {0f983e3e5be1=0} racks are {/default-rack=0} 2024-12-08T00:46:12,254 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-08T00:46:12,254 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-08T00:46:12,254 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-08T00:46:12,254 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-08T00:46:12,254 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-08T00:46:12,254 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-08T00:46:12,254 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-08T00:46:12,254 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-08T00:46:12,254 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-08T00:46:12,254 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-08T00:46:12,255 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=917f5ae6d5f9863aa098e1ff4c897aa6, ASSIGN}] 2024-12-08T00:46:12,258 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=917f5ae6d5f9863aa098e1ff4c897aa6, ASSIGN 2024-12-08T00:46:12,260 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=917f5ae6d5f9863aa098e1ff4c897aa6, ASSIGN; state=OFFLINE, location=0f983e3e5be1,44379,1733618769693; forceNewPlan=false, retain=false 2024-12-08T00:46:12,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40457 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-08T00:46:12,414 INFO [0f983e3e5be1:40457 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-08T00:46:12,415 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=917f5ae6d5f9863aa098e1ff4c897aa6, regionState=OPENING, regionLocation=0f983e3e5be1,44379,1733618769693 2024-12-08T00:46:12,420 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=917f5ae6d5f9863aa098e1ff4c897aa6, ASSIGN because future has completed 2024-12-08T00:46:12,421 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 917f5ae6d5f9863aa098e1ff4c897aa6, server=0f983e3e5be1,44379,1733618769693}] 2024-12-08T00:46:12,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40457 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-08T00:46:12,577 DEBUG [RSProcedureDispatcher-pool-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-08T00:46:12,579 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39741, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-08T00:46:12,586 INFO [RS_OPEN_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestHBaseWalOnEC,,1733618772163.917f5ae6d5f9863aa098e1ff4c897aa6. 2024-12-08T00:46:12,587 DEBUG [RS_OPEN_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 917f5ae6d5f9863aa098e1ff4c897aa6, NAME => 'TestHBaseWalOnEC,,1733618772163.917f5ae6d5f9863aa098e1ff4c897aa6.', STARTKEY => '', ENDKEY => ''} 2024-12-08T00:46:12,588 DEBUG [RS_OPEN_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestHBaseWalOnEC 917f5ae6d5f9863aa098e1ff4c897aa6 2024-12-08T00:46:12,588 DEBUG [RS_OPEN_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1733618772163.917f5ae6d5f9863aa098e1ff4c897aa6.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T00:46:12,588 DEBUG [RS_OPEN_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 917f5ae6d5f9863aa098e1ff4c897aa6 2024-12-08T00:46:12,588 DEBUG [RS_OPEN_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 917f5ae6d5f9863aa098e1ff4c897aa6 2024-12-08T00:46:12,591 INFO [StoreOpener-917f5ae6d5f9863aa098e1ff4c897aa6-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 917f5ae6d5f9863aa098e1ff4c897aa6 2024-12-08T00:46:12,594 INFO [StoreOpener-917f5ae6d5f9863aa098e1ff4c897aa6-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 917f5ae6d5f9863aa098e1ff4c897aa6 columnFamilyName cf 2024-12-08T00:46:12,594 DEBUG [StoreOpener-917f5ae6d5f9863aa098e1ff4c897aa6-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:46:12,595 INFO [StoreOpener-917f5ae6d5f9863aa098e1ff4c897aa6-1 {}] regionserver.HStore(327): Store=917f5ae6d5f9863aa098e1ff4c897aa6/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-08T00:46:12,595 DEBUG [RS_OPEN_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 917f5ae6d5f9863aa098e1ff4c897aa6 2024-12-08T00:46:12,597 DEBUG [RS_OPEN_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46415/user/jenkins/test-data/f88bb015-a4dc-a798-6f89-5983724f011a/data/default/TestHBaseWalOnEC/917f5ae6d5f9863aa098e1ff4c897aa6 2024-12-08T00:46:12,597 DEBUG [RS_OPEN_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46415/user/jenkins/test-data/f88bb015-a4dc-a798-6f89-5983724f011a/data/default/TestHBaseWalOnEC/917f5ae6d5f9863aa098e1ff4c897aa6 2024-12-08T00:46:12,598 DEBUG [RS_OPEN_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 917f5ae6d5f9863aa098e1ff4c897aa6 2024-12-08T00:46:12,598 DEBUG [RS_OPEN_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 917f5ae6d5f9863aa098e1ff4c897aa6 2024-12-08T00:46:12,601 DEBUG [RS_OPEN_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 917f5ae6d5f9863aa098e1ff4c897aa6 2024-12-08T00:46:12,605 DEBUG [RS_OPEN_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46415/user/jenkins/test-data/f88bb015-a4dc-a798-6f89-5983724f011a/data/default/TestHBaseWalOnEC/917f5ae6d5f9863aa098e1ff4c897aa6/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-08T00:46:12,606 INFO [RS_OPEN_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 917f5ae6d5f9863aa098e1ff4c897aa6; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=63988387, jitterRate=-0.04649873077869415}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-08T00:46:12,606 DEBUG [RS_OPEN_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 917f5ae6d5f9863aa098e1ff4c897aa6 2024-12-08T00:46:12,607 DEBUG [RS_OPEN_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 917f5ae6d5f9863aa098e1ff4c897aa6: Running coprocessor pre-open hook at 1733618772588Writing region info on filesystem at 1733618772588Initializing all the Stores at 1733618772591 (+3 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733618772591Cleaning up temporary data from old regions at 1733618772598 (+7 ms)Running coprocessor post-open hooks at 1733618772607 (+9 ms)Region opened successfully at 1733618772607 2024-12-08T00:46:12,609 INFO [RS_OPEN_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestHBaseWalOnEC,,1733618772163.917f5ae6d5f9863aa098e1ff4c897aa6., pid=6, masterSystemTime=1733618772576 2024-12-08T00:46:12,612 DEBUG [RS_OPEN_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestHBaseWalOnEC,,1733618772163.917f5ae6d5f9863aa098e1ff4c897aa6. 2024-12-08T00:46:12,612 INFO [RS_OPEN_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestHBaseWalOnEC,,1733618772163.917f5ae6d5f9863aa098e1ff4c897aa6. 2024-12-08T00:46:12,614 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=917f5ae6d5f9863aa098e1ff4c897aa6, regionState=OPEN, openSeqNum=2, regionLocation=0f983e3e5be1,44379,1733618769693 2024-12-08T00:46:12,617 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 917f5ae6d5f9863aa098e1ff4c897aa6, server=0f983e3e5be1,44379,1733618769693 because future has completed 2024-12-08T00:46:12,624 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-08T00:46:12,624 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 917f5ae6d5f9863aa098e1ff4c897aa6, server=0f983e3e5be1,44379,1733618769693 in 199 msec 2024-12-08T00:46:12,629 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-08T00:46:12,629 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=917f5ae6d5f9863aa098e1ff4c897aa6, ASSIGN in 369 msec 2024-12-08T00:46:12,631 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-08T00:46:12,632 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733618772632"}]},"ts":"1733618772632"} 2024-12-08T00:46:12,635 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLED in hbase:meta 2024-12-08T00:46:12,637 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_POST_OPERATION 2024-12-08T00:46:12,640 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC in 463 msec 2024-12-08T00:46:12,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40457 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-08T00:46:12,820 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestHBaseWalOnEC completed 2024-12-08T00:46:12,820 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table TestHBaseWalOnEC get assigned. Timeout = 60000ms 2024-12-08T00:46:12,822 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-08T00:46:12,832 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table TestHBaseWalOnEC assigned to meta. Checking AM states. 2024-12-08T00:46:12,832 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-08T00:46:12,833 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table TestHBaseWalOnEC assigned. 2024-12-08T00:46:12,842 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestHBaseWalOnEC', row='row', locateType=CURRENT is [region=TestHBaseWalOnEC,,1733618772163.917f5ae6d5f9863aa098e1ff4c897aa6., hostname=0f983e3e5be1,44379,1733618769693, seqNum=2] 2024-12-08T00:46:12,843 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T00:46:12,846 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53138, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T00:46:12,854 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40457 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestHBaseWalOnEC 2024-12-08T00:46:12,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40457 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC 2024-12-08T00:46:12,860 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_PREPARE 2024-12-08T00:46:12,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40457 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-08T00:46:12,862 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-08T00:46:12,864 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-08T00:46:12,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40457 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-08T00:46:13,032 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44379 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-12-08T00:46:13,034 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0f983e3e5be1:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestHBaseWalOnEC,,1733618772163.917f5ae6d5f9863aa098e1ff4c897aa6. 2024-12-08T00:46:13,039 INFO [RS_FLUSH_OPERATIONS-regionserver/0f983e3e5be1:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing 917f5ae6d5f9863aa098e1ff4c897aa6 1/1 column families, dataSize=32 B heapSize=360 B 2024-12-08T00:46:13,088 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0f983e3e5be1:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46415/user/jenkins/test-data/f88bb015-a4dc-a798-6f89-5983724f011a/data/default/TestHBaseWalOnEC/917f5ae6d5f9863aa098e1ff4c897aa6/.tmp/cf/5ae20e71ea0b4044acbf48d230852b73 is 36, key is row/cf:cq/1733618772846/Put/seqid=0 2024-12-08T00:46:13,094 WARN [RS_FLUSH_OPERATIONS-regionserver/0f983e3e5be1:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-08T00:46:13,094 WARN [RS_FLUSH_OPERATIONS-regionserver/0f983e3e5be1:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-08T00:46:13,098 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1961346583_22 at /127.0.0.1:34126 [Receiving block BP-356801691-172.17.0.2-1733618765086:blk_-9223372036854775648_1024] {}] datanode.DataXceiver(331): 127.0.0.1:40453:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34126 dst: /127.0.0.1:40453 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:46:13,102 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40453 is added to blk_-9223372036854775648_1025 (size=4787) 2024-12-08T00:46:13,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40457 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-08T00:46:13,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40457 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-08T00:46:13,504 WARN [RS_FLUSH_OPERATIONS-regionserver/0f983e3e5be1:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-08T00:46:13,504 INFO [RS_FLUSH_OPERATIONS-regionserver/0f983e3e5be1:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=32 B at sequenceid=5 (bloomFilter=false), to=hdfs://localhost:46415/user/jenkins/test-data/f88bb015-a4dc-a798-6f89-5983724f011a/data/default/TestHBaseWalOnEC/917f5ae6d5f9863aa098e1ff4c897aa6/.tmp/cf/5ae20e71ea0b4044acbf48d230852b73 2024-12-08T00:46:13,549 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0f983e3e5be1:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46415/user/jenkins/test-data/f88bb015-a4dc-a798-6f89-5983724f011a/data/default/TestHBaseWalOnEC/917f5ae6d5f9863aa098e1ff4c897aa6/.tmp/cf/5ae20e71ea0b4044acbf48d230852b73 as hdfs://localhost:46415/user/jenkins/test-data/f88bb015-a4dc-a798-6f89-5983724f011a/data/default/TestHBaseWalOnEC/917f5ae6d5f9863aa098e1ff4c897aa6/cf/5ae20e71ea0b4044acbf48d230852b73 2024-12-08T00:46:13,558 INFO [RS_FLUSH_OPERATIONS-regionserver/0f983e3e5be1:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46415/user/jenkins/test-data/f88bb015-a4dc-a798-6f89-5983724f011a/data/default/TestHBaseWalOnEC/917f5ae6d5f9863aa098e1ff4c897aa6/cf/5ae20e71ea0b4044acbf48d230852b73, entries=1, sequenceid=5, filesize=4.7 K 2024-12-08T00:46:13,565 INFO [RS_FLUSH_OPERATIONS-regionserver/0f983e3e5be1:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~32 B/32, heapSize ~344 B/344, currentSize=0 B/0 for 917f5ae6d5f9863aa098e1ff4c897aa6 in 526ms, sequenceid=5, compaction requested=false 2024-12-08T00:46:13,566 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0f983e3e5be1:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestHBaseWalOnEC' 2024-12-08T00:46:13,568 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0f983e3e5be1:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for 917f5ae6d5f9863aa098e1ff4c897aa6: 2024-12-08T00:46:13,568 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0f983e3e5be1:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestHBaseWalOnEC,,1733618772163.917f5ae6d5f9863aa098e1ff4c897aa6. 2024-12-08T00:46:13,569 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0f983e3e5be1:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-12-08T00:46:13,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40457 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-12-08T00:46:13,577 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-12-08T00:46:13,577 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 710 msec 2024-12-08T00:46:13,581 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC in 723 msec 2024-12-08T00:46:13,976 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41981 is added to blk_-9223372036854775773_1004 (size=42) 2024-12-08T00:46:13,977 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40453 is added to blk_-9223372036854775772_1004 (size=42) 2024-12-08T00:46:13,978 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40453 is added to blk_-9223372036854775741_1008 (size=1189) 2024-12-08T00:46:13,980 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41981 is added to blk_-9223372036854775693_1015 (size=32) 2024-12-08T00:46:13,982 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41981 is added to blk_-9223372036854775740_1008 (size=1189) 2024-12-08T00:46:13,982 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41981 is added to blk_-9223372036854775708_1013 (size=1321) 2024-12-08T00:46:13,982 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40453 is added to blk_-9223372036854775692_1015 (size=32) 2024-12-08T00:46:13,983 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40453 is added to blk_-9223372036854775709_1013 (size=1321) 2024-12-08T00:46:13,986 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34549 is added to blk_-9223372036854775757_1006 (size=196) 2024-12-08T00:46:13,986 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40453 is added to blk_-9223372036854775756_1006 (size=196) 2024-12-08T00:46:13,986 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41981 is added to blk_-9223372036854775724_1010 (size=34) 2024-12-08T00:46:13,986 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34549 is added to blk_-9223372036854775725_1010 (size=34) 2024-12-08T00:46:13,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40457 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-08T00:46:13,998 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestHBaseWalOnEC completed 2024-12-08T00:46:14,010 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-08T00:46:14,010 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-08T00:46:14,010 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-08T00:46:14,014 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T00:46:14,014 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T00:46:14,014 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-08T00:46:14,014 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-08T00:46:14,015 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=368815740, stopped=false 2024-12-08T00:46:14,015 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=0f983e3e5be1,40457,1733618768954 2024-12-08T00:46:14,071 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38841-0x10002f0d7d50003, quorum=127.0.0.1:55931, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-08T00:46:14,071 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33233-0x10002f0d7d50002, quorum=127.0.0.1:55931, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-08T00:46:14,071 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40457-0x10002f0d7d50000, quorum=127.0.0.1:55931, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-08T00:46:14,072 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44379-0x10002f0d7d50001, quorum=127.0.0.1:55931, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-08T00:46:14,072 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33233-0x10002f0d7d50002, quorum=127.0.0.1:55931, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T00:46:14,072 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44379-0x10002f0d7d50001, quorum=127.0.0.1:55931, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T00:46:14,072 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38841-0x10002f0d7d50003, quorum=127.0.0.1:55931, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T00:46:14,072 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40457-0x10002f0d7d50000, quorum=127.0.0.1:55931, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T00:46:14,072 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-08T00:46:14,074 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:40457-0x10002f0d7d50000, quorum=127.0.0.1:55931, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-08T00:46:14,074 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-08T00:46:14,074 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:33233-0x10002f0d7d50002, quorum=127.0.0.1:55931, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-08T00:46:14,074 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:44379-0x10002f0d7d50001, quorum=127.0.0.1:55931, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-08T00:46:14,074 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:38841-0x10002f0d7d50003, quorum=127.0.0.1:55931, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-08T00:46:14,075 INFO [RS:2;0f983e3e5be1:38841 {}] regionserver.HRegionServer(878): Closing user regions 2024-12-08T00:46:14,075 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-08T00:46:14,075 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T00:46:14,076 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '0f983e3e5be1,44379,1733618769693' ***** 2024-12-08T00:46:14,077 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-08T00:46:14,077 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '0f983e3e5be1,33233,1733618769793' ***** 2024-12-08T00:46:14,077 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-08T00:46:14,077 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '0f983e3e5be1,38841,1733618769837' ***** 2024-12-08T00:46:14,077 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-08T00:46:14,077 INFO [RS:0;0f983e3e5be1:44379 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-08T00:46:14,077 INFO [RS:1;0f983e3e5be1:33233 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-08T00:46:14,077 INFO [RS:0;0f983e3e5be1:44379 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-08T00:46:14,077 INFO [RS:1;0f983e3e5be1:33233 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-08T00:46:14,078 INFO [RS:0;0f983e3e5be1:44379 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-08T00:46:14,078 INFO [RS:1;0f983e3e5be1:33233 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-08T00:46:14,078 INFO [RS:1;0f983e3e5be1:33233 {}] regionserver.HRegionServer(959): stopping server 0f983e3e5be1,33233,1733618769793 2024-12-08T00:46:14,078 INFO [RS:1;0f983e3e5be1:33233 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-08T00:46:14,078 INFO [RS:0;0f983e3e5be1:44379 {}] regionserver.HRegionServer(3091): Received CLOSE for 917f5ae6d5f9863aa098e1ff4c897aa6 2024-12-08T00:46:14,078 INFO [RS:1;0f983e3e5be1:33233 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;0f983e3e5be1:33233. 2024-12-08T00:46:14,078 DEBUG [RS:1;0f983e3e5be1:33233 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-08T00:46:14,078 DEBUG [RS:1;0f983e3e5be1:33233 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T00:46:14,078 INFO [RS:2;0f983e3e5be1:38841 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-08T00:46:14,078 INFO [RS:2;0f983e3e5be1:38841 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-08T00:46:14,078 INFO [RS:1;0f983e3e5be1:33233 {}] regionserver.HRegionServer(976): stopping server 0f983e3e5be1,33233,1733618769793; all regions closed. 2024-12-08T00:46:14,078 INFO [RS:2;0f983e3e5be1:38841 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-08T00:46:14,079 INFO [RS:2;0f983e3e5be1:38841 {}] regionserver.HRegionServer(959): stopping server 0f983e3e5be1,38841,1733618769837 2024-12-08T00:46:14,079 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-08T00:46:14,079 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-08T00:46:14,079 INFO [RS:2;0f983e3e5be1:38841 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-08T00:46:14,079 INFO [RS:0;0f983e3e5be1:44379 {}] regionserver.HRegionServer(959): stopping server 0f983e3e5be1,44379,1733618769693 2024-12-08T00:46:14,079 INFO [RS:2;0f983e3e5be1:38841 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:2;0f983e3e5be1:38841. 2024-12-08T00:46:14,079 INFO [RS:0;0f983e3e5be1:44379 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-08T00:46:14,079 DEBUG [RS:2;0f983e3e5be1:38841 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-08T00:46:14,079 INFO [RS:0;0f983e3e5be1:44379 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;0f983e3e5be1:44379. 2024-12-08T00:46:14,079 DEBUG [RS:2;0f983e3e5be1:38841 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T00:46:14,079 DEBUG [RS:0;0f983e3e5be1:44379 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-08T00:46:14,079 DEBUG [RS:0;0f983e3e5be1:44379 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T00:46:14,079 DEBUG [RS_CLOSE_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 917f5ae6d5f9863aa098e1ff4c897aa6, disabling compactions & flushes 2024-12-08T00:46:14,080 INFO [RS_CLOSE_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1733618772163.917f5ae6d5f9863aa098e1ff4c897aa6. 2024-12-08T00:46:14,080 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-08T00:46:14,080 DEBUG [RS_CLOSE_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1733618772163.917f5ae6d5f9863aa098e1ff4c897aa6. 2024-12-08T00:46:14,080 DEBUG [RS_CLOSE_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1733618772163.917f5ae6d5f9863aa098e1ff4c897aa6. after waiting 0 ms 2024-12-08T00:46:14,080 DEBUG [RS_CLOSE_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1733618772163.917f5ae6d5f9863aa098e1ff4c897aa6. 2024-12-08T00:46:14,080 INFO [RS:0;0f983e3e5be1:44379 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-12-08T00:46:14,080 INFO [RS:2;0f983e3e5be1:38841 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-08T00:46:14,080 INFO [RS:2;0f983e3e5be1:38841 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-08T00:46:14,080 DEBUG [RS:0;0f983e3e5be1:44379 {}] regionserver.HRegionServer(1325): Online Regions={917f5ae6d5f9863aa098e1ff4c897aa6=TestHBaseWalOnEC,,1733618772163.917f5ae6d5f9863aa098e1ff4c897aa6.} 2024-12-08T00:46:14,080 INFO [RS:2;0f983e3e5be1:38841 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-08T00:46:14,080 INFO [RS:2;0f983e3e5be1:38841 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-08T00:46:14,081 DEBUG [RS:0;0f983e3e5be1:44379 {}] regionserver.HRegionServer(1351): Waiting on 917f5ae6d5f9863aa098e1ff4c897aa6 2024-12-08T00:46:14,083 INFO [RS:2;0f983e3e5be1:38841 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-12-08T00:46:14,083 DEBUG [RS:2;0f983e3e5be1:38841 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-12-08T00:46:14,083 DEBUG [RS:2;0f983e3e5be1:38841 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-12-08T00:46:14,083 DEBUG [RS_CLOSE_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-08T00:46:14,083 INFO [RS_CLOSE_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-08T00:46:14,083 DEBUG [RS_CLOSE_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-08T00:46:14,083 DEBUG [RS_CLOSE_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-08T00:46:14,083 DEBUG [RS_CLOSE_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-08T00:46:14,084 INFO [RS_CLOSE_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.34 KB heapSize=3.38 KB 2024-12-08T00:46:14,087 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40453 is added to blk_1073741827_1017 (size=93) 2024-12-08T00:46:14,087 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41981 is added to blk_1073741827_1017 (size=93) 2024-12-08T00:46:14,088 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34549 is added to blk_1073741827_1017 (size=93) 2024-12-08T00:46:14,094 DEBUG [RS:1;0f983e3e5be1:33233 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/f88bb015-a4dc-a798-6f89-5983724f011a/oldWALs 2024-12-08T00:46:14,094 INFO [RS:1;0f983e3e5be1:33233 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 0f983e3e5be1%2C33233%2C1733618769793:(num 1733618771312) 2024-12-08T00:46:14,094 DEBUG [RS:1;0f983e3e5be1:33233 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T00:46:14,094 INFO [RS:1;0f983e3e5be1:33233 {}] regionserver.LeaseManager(133): Closed leases 2024-12-08T00:46:14,094 INFO [RS:1;0f983e3e5be1:33233 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-08T00:46:14,095 INFO [RS:1;0f983e3e5be1:33233 {}] hbase.ChoreService(370): Chore service for: regionserver/0f983e3e5be1:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-08T00:46:14,095 INFO [RS:1;0f983e3e5be1:33233 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-08T00:46:14,095 INFO [RS:1;0f983e3e5be1:33233 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-08T00:46:14,095 INFO [RS:1;0f983e3e5be1:33233 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-08T00:46:14,095 INFO [regionserver/0f983e3e5be1:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-08T00:46:14,095 INFO [RS:1;0f983e3e5be1:33233 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-08T00:46:14,095 INFO [RS:1;0f983e3e5be1:33233 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:33233 2024-12-08T00:46:14,096 DEBUG [RS_CLOSE_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46415/user/jenkins/test-data/f88bb015-a4dc-a798-6f89-5983724f011a/data/default/TestHBaseWalOnEC/917f5ae6d5f9863aa098e1ff4c897aa6/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-08T00:46:14,098 INFO [RS_CLOSE_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1733618772163.917f5ae6d5f9863aa098e1ff4c897aa6. 2024-12-08T00:46:14,098 DEBUG [RS_CLOSE_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 917f5ae6d5f9863aa098e1ff4c897aa6: Waiting for close lock at 1733618774079Running coprocessor pre-close hooks at 1733618774079Disabling compacts and flushes for region at 1733618774079Disabling writes for close at 1733618774080 (+1 ms)Writing region close event to WAL at 1733618774083 (+3 ms)Running coprocessor post-close hooks at 1733618774097 (+14 ms)Closed at 1733618774098 (+1 ms) 2024-12-08T00:46:14,098 DEBUG [RS_CLOSE_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestHBaseWalOnEC,,1733618772163.917f5ae6d5f9863aa098e1ff4c897aa6. 2024-12-08T00:46:14,109 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33233-0x10002f0d7d50002, quorum=127.0.0.1:55931, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/0f983e3e5be1,33233,1733618769793 2024-12-08T00:46:14,109 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40457-0x10002f0d7d50000, quorum=127.0.0.1:55931, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-08T00:46:14,109 INFO [RS:1;0f983e3e5be1:33233 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-08T00:46:14,110 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [0f983e3e5be1,33233,1733618769793] 2024-12-08T00:46:14,114 DEBUG [RS_CLOSE_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46415/user/jenkins/test-data/f88bb015-a4dc-a798-6f89-5983724f011a/data/hbase/meta/1588230740/.tmp/info/adfe482c4bfc454198cbde21bc0ed470 is 153, key is TestHBaseWalOnEC,,1733618772163.917f5ae6d5f9863aa098e1ff4c897aa6./info:regioninfo/1733618772613/Put/seqid=0 2024-12-08T00:46:14,117 WARN [RS_CLOSE_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-08T00:46:14,117 WARN [RS_CLOSE_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-08T00:46:14,121 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-150962642_22 at /127.0.0.1:39028 [Receiving block BP-356801691-172.17.0.2-1733618765086:blk_-9223372036854775632_1026] {}] datanode.DataXceiver(331): 127.0.0.1:41981:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:39028 dst: /127.0.0.1:41981 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:46:14,125 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41981 is added to blk_-9223372036854775632_1027 (size=6637) 2024-12-08T00:46:14,125 WARN [RS_CLOSE_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-08T00:46:14,125 INFO [RS_CLOSE_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.18 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:46415/user/jenkins/test-data/f88bb015-a4dc-a798-6f89-5983724f011a/data/hbase/meta/1588230740/.tmp/info/adfe482c4bfc454198cbde21bc0ed470 2024-12-08T00:46:14,129 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/0f983e3e5be1,33233,1733618769793 already deleted, retry=false 2024-12-08T00:46:14,129 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 0f983e3e5be1,33233,1733618769793 expired; onlineServers=2 2024-12-08T00:46:14,150 DEBUG [RS_CLOSE_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46415/user/jenkins/test-data/f88bb015-a4dc-a798-6f89-5983724f011a/data/hbase/meta/1588230740/.tmp/ns/208d6a3f15f040e98f432163e2d522dc is 43, key is default/ns:d/1733618771952/Put/seqid=0 2024-12-08T00:46:14,151 INFO [regionserver/0f983e3e5be1:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-08T00:46:14,151 INFO [regionserver/0f983e3e5be1:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-08T00:46:14,152 INFO [regionserver/0f983e3e5be1:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-08T00:46:14,152 INFO [regionserver/0f983e3e5be1:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-08T00:46:14,152 WARN [RS_CLOSE_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-08T00:46:14,152 WARN [RS_CLOSE_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-08T00:46:14,155 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-150962642_22 at /127.0.0.1:46740 [Receiving block BP-356801691-172.17.0.2-1733618765086:blk_-9223372036854775616_1028] {}] datanode.DataXceiver(331): 127.0.0.1:34549:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:46740 dst: /127.0.0.1:34549 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:46:14,159 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34549 is added to blk_-9223372036854775616_1029 (size=5153) 2024-12-08T00:46:14,159 INFO [regionserver/0f983e3e5be1:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-08T00:46:14,159 INFO [regionserver/0f983e3e5be1:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-08T00:46:14,159 INFO [regionserver/0f983e3e5be1:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-08T00:46:14,159 WARN [RS_CLOSE_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-08T00:46:14,159 INFO [RS_CLOSE_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:46415/user/jenkins/test-data/f88bb015-a4dc-a798-6f89-5983724f011a/data/hbase/meta/1588230740/.tmp/ns/208d6a3f15f040e98f432163e2d522dc 2024-12-08T00:46:14,183 DEBUG [RS_CLOSE_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46415/user/jenkins/test-data/f88bb015-a4dc-a798-6f89-5983724f011a/data/hbase/meta/1588230740/.tmp/table/3c5e422a0b8947c0b0e80bd8262ab956 is 52, key is TestHBaseWalOnEC/table:state/1733618772632/Put/seqid=0 2024-12-08T00:46:14,185 WARN [RS_CLOSE_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-08T00:46:14,185 WARN [RS_CLOSE_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-08T00:46:14,188 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-150962642_22 at /127.0.0.1:39046 [Receiving block BP-356801691-172.17.0.2-1733618765086:blk_-9223372036854775600_1030] {}] datanode.DataXceiver(331): 127.0.0.1:41981:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:39046 dst: /127.0.0.1:41981 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:46:14,192 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41981 is added to blk_-9223372036854775600_1031 (size=5249) 2024-12-08T00:46:14,193 WARN [RS_CLOSE_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-08T00:46:14,193 INFO [RS_CLOSE_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=96 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:46415/user/jenkins/test-data/f88bb015-a4dc-a798-6f89-5983724f011a/data/hbase/meta/1588230740/.tmp/table/3c5e422a0b8947c0b0e80bd8262ab956 2024-12-08T00:46:14,204 DEBUG [RS_CLOSE_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46415/user/jenkins/test-data/f88bb015-a4dc-a798-6f89-5983724f011a/data/hbase/meta/1588230740/.tmp/info/adfe482c4bfc454198cbde21bc0ed470 as hdfs://localhost:46415/user/jenkins/test-data/f88bb015-a4dc-a798-6f89-5983724f011a/data/hbase/meta/1588230740/info/adfe482c4bfc454198cbde21bc0ed470 2024-12-08T00:46:14,213 INFO [RS_CLOSE_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46415/user/jenkins/test-data/f88bb015-a4dc-a798-6f89-5983724f011a/data/hbase/meta/1588230740/info/adfe482c4bfc454198cbde21bc0ed470, entries=10, sequenceid=11, filesize=6.5 K 2024-12-08T00:46:14,215 DEBUG [RS_CLOSE_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46415/user/jenkins/test-data/f88bb015-a4dc-a798-6f89-5983724f011a/data/hbase/meta/1588230740/.tmp/ns/208d6a3f15f040e98f432163e2d522dc as hdfs://localhost:46415/user/jenkins/test-data/f88bb015-a4dc-a798-6f89-5983724f011a/data/hbase/meta/1588230740/ns/208d6a3f15f040e98f432163e2d522dc 2024-12-08T00:46:14,221 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33233-0x10002f0d7d50002, quorum=127.0.0.1:55931, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-08T00:46:14,221 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33233-0x10002f0d7d50002, quorum=127.0.0.1:55931, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-08T00:46:14,222 INFO [RS:1;0f983e3e5be1:33233 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-08T00:46:14,222 INFO [RS:1;0f983e3e5be1:33233 {}] regionserver.HRegionServer(1031): Exiting; stopping=0f983e3e5be1,33233,1733618769793; zookeeper connection closed. 2024-12-08T00:46:14,222 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@6dbdb4f7 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@6dbdb4f7 2024-12-08T00:46:14,224 INFO [RS_CLOSE_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46415/user/jenkins/test-data/f88bb015-a4dc-a798-6f89-5983724f011a/data/hbase/meta/1588230740/ns/208d6a3f15f040e98f432163e2d522dc, entries=2, sequenceid=11, filesize=5.0 K 2024-12-08T00:46:14,226 DEBUG [RS_CLOSE_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46415/user/jenkins/test-data/f88bb015-a4dc-a798-6f89-5983724f011a/data/hbase/meta/1588230740/.tmp/table/3c5e422a0b8947c0b0e80bd8262ab956 as hdfs://localhost:46415/user/jenkins/test-data/f88bb015-a4dc-a798-6f89-5983724f011a/data/hbase/meta/1588230740/table/3c5e422a0b8947c0b0e80bd8262ab956 2024-12-08T00:46:14,235 INFO [RS_CLOSE_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46415/user/jenkins/test-data/f88bb015-a4dc-a798-6f89-5983724f011a/data/hbase/meta/1588230740/table/3c5e422a0b8947c0b0e80bd8262ab956, entries=2, sequenceid=11, filesize=5.1 K 2024-12-08T00:46:14,236 INFO [RS_CLOSE_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 153ms, sequenceid=11, compaction requested=false 2024-12-08T00:46:14,237 DEBUG [RS_CLOSE_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-08T00:46:14,248 DEBUG [RS_CLOSE_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46415/user/jenkins/test-data/f88bb015-a4dc-a798-6f89-5983724f011a/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-12-08T00:46:14,249 DEBUG [RS_CLOSE_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-08T00:46:14,249 INFO [RS_CLOSE_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-08T00:46:14,249 DEBUG [RS_CLOSE_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733618774083Running coprocessor pre-close hooks at 1733618774083Disabling compacts and flushes for region at 1733618774083Disabling writes for close at 1733618774083Obtaining lock to block concurrent updates at 1733618774084 (+1 ms)Preparing flush snapshotting stores in 1588230740 at 1733618774084Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1377, getHeapSize=3392, getOffHeapSize=0, getCellsCount=14 at 1733618774085 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1733618774087 (+2 ms)Flushing 1588230740/info: creating writer at 1733618774087Flushing 1588230740/info: appending metadata at 1733618774112 (+25 ms)Flushing 1588230740/info: closing flushed file at 1733618774112Flushing 1588230740/ns: creating writer at 1733618774135 (+23 ms)Flushing 1588230740/ns: appending metadata at 1733618774149 (+14 ms)Flushing 1588230740/ns: closing flushed file at 1733618774149Flushing 1588230740/table: creating writer at 1733618774167 (+18 ms)Flushing 1588230740/table: appending metadata at 1733618774182 (+15 ms)Flushing 1588230740/table: closing flushed file at 1733618774182Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@34d46a4d: reopening flushed file at 1733618774203 (+21 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@285831ac: reopening flushed file at 1733618774213 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@27c43c00: reopening flushed file at 1733618774225 (+12 ms)Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 153ms, sequenceid=11, compaction requested=false at 1733618774236 (+11 ms)Writing region close event to WAL at 1733618774238 (+2 ms)Running coprocessor post-close hooks at 1733618774249 (+11 ms)Closed at 1733618774249 2024-12-08T00:46:14,250 DEBUG [RS_CLOSE_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-08T00:46:14,281 INFO [RS:0;0f983e3e5be1:44379 {}] regionserver.HRegionServer(976): stopping server 0f983e3e5be1,44379,1733618769693; all regions closed. 2024-12-08T00:46:14,283 INFO [RS:2;0f983e3e5be1:38841 {}] regionserver.HRegionServer(976): stopping server 0f983e3e5be1,38841,1733618769837; all regions closed. 2024-12-08T00:46:14,284 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40453 is added to blk_1073741828_1018 (size=1298) 2024-12-08T00:46:14,284 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34549 is added to blk_1073741828_1018 (size=1298) 2024-12-08T00:46:14,285 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41981 is added to blk_1073741828_1018 (size=1298) 2024-12-08T00:46:14,287 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40453 is added to blk_1073741829_1019 (size=2751) 2024-12-08T00:46:14,288 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41981 is added to blk_1073741829_1019 (size=2751) 2024-12-08T00:46:14,288 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34549 is added to blk_1073741829_1019 (size=2751) 2024-12-08T00:46:14,291 DEBUG [RS:0;0f983e3e5be1:44379 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/f88bb015-a4dc-a798-6f89-5983724f011a/oldWALs 2024-12-08T00:46:14,291 INFO [RS:0;0f983e3e5be1:44379 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 0f983e3e5be1%2C44379%2C1733618769693:(num 1733618771312) 2024-12-08T00:46:14,291 DEBUG [RS:0;0f983e3e5be1:44379 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T00:46:14,291 INFO [RS:0;0f983e3e5be1:44379 {}] regionserver.LeaseManager(133): Closed leases 2024-12-08T00:46:14,291 INFO [RS:0;0f983e3e5be1:44379 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-08T00:46:14,291 INFO [RS:0;0f983e3e5be1:44379 {}] hbase.ChoreService(370): Chore service for: regionserver/0f983e3e5be1:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-08T00:46:14,291 INFO [RS:0;0f983e3e5be1:44379 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-08T00:46:14,291 INFO [regionserver/0f983e3e5be1:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-08T00:46:14,291 INFO [RS:0;0f983e3e5be1:44379 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-08T00:46:14,291 INFO [RS:0;0f983e3e5be1:44379 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-08T00:46:14,291 INFO [RS:0;0f983e3e5be1:44379 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-08T00:46:14,292 INFO [RS:0;0f983e3e5be1:44379 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:44379 2024-12-08T00:46:14,292 DEBUG [RS:2;0f983e3e5be1:38841 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/f88bb015-a4dc-a798-6f89-5983724f011a/oldWALs 2024-12-08T00:46:14,292 INFO [RS:2;0f983e3e5be1:38841 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 0f983e3e5be1%2C38841%2C1733618769837.meta:.meta(num 1733618771781) 2024-12-08T00:46:14,295 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40453 is added to blk_1073741826_1016 (size=93) 2024-12-08T00:46:14,295 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34549 is added to blk_1073741826_1016 (size=93) 2024-12-08T00:46:14,295 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41981 is added to blk_1073741826_1016 (size=93) 2024-12-08T00:46:14,298 DEBUG [RS:2;0f983e3e5be1:38841 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/f88bb015-a4dc-a798-6f89-5983724f011a/oldWALs 2024-12-08T00:46:14,298 INFO [RS:2;0f983e3e5be1:38841 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 0f983e3e5be1%2C38841%2C1733618769837:(num 1733618771311) 2024-12-08T00:46:14,298 DEBUG [RS:2;0f983e3e5be1:38841 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T00:46:14,298 INFO [RS:2;0f983e3e5be1:38841 {}] regionserver.LeaseManager(133): Closed leases 2024-12-08T00:46:14,298 INFO [RS:2;0f983e3e5be1:38841 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-08T00:46:14,298 INFO [RS:2;0f983e3e5be1:38841 {}] hbase.ChoreService(370): Chore service for: regionserver/0f983e3e5be1:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-08T00:46:14,298 INFO [RS:2;0f983e3e5be1:38841 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-08T00:46:14,298 INFO [regionserver/0f983e3e5be1:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-08T00:46:14,298 INFO [RS:2;0f983e3e5be1:38841 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:38841 2024-12-08T00:46:14,318 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44379-0x10002f0d7d50001, quorum=127.0.0.1:55931, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/0f983e3e5be1,44379,1733618769693 2024-12-08T00:46:14,318 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40457-0x10002f0d7d50000, quorum=127.0.0.1:55931, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-08T00:46:14,318 INFO [RS:0;0f983e3e5be1:44379 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-08T00:46:14,326 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38841-0x10002f0d7d50003, quorum=127.0.0.1:55931, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/0f983e3e5be1,38841,1733618769837 2024-12-08T00:46:14,326 INFO [RS:2;0f983e3e5be1:38841 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-08T00:46:14,335 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [0f983e3e5be1,38841,1733618769837] 2024-12-08T00:46:14,351 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/0f983e3e5be1,38841,1733618769837 already deleted, retry=false 2024-12-08T00:46:14,351 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 0f983e3e5be1,38841,1733618769837 expired; onlineServers=1 2024-12-08T00:46:14,351 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [0f983e3e5be1,44379,1733618769693] 2024-12-08T00:46:14,362 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/0f983e3e5be1,44379,1733618769693 already deleted, retry=false 2024-12-08T00:46:14,363 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 0f983e3e5be1,44379,1733618769693 expired; onlineServers=0 2024-12-08T00:46:14,363 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '0f983e3e5be1,40457,1733618768954' ***** 2024-12-08T00:46:14,363 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-08T00:46:14,363 INFO [M:0;0f983e3e5be1:40457 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-08T00:46:14,364 INFO [M:0;0f983e3e5be1:40457 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-08T00:46:14,364 DEBUG [M:0;0f983e3e5be1:40457 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-08T00:46:14,364 DEBUG [M:0;0f983e3e5be1:40457 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-08T00:46:14,364 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-08T00:46:14,364 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster-HFileCleaner.small.0-1733618770940 {}] cleaner.HFileCleaner(306): Exit Thread[master/0f983e3e5be1:0:becomeActiveMaster-HFileCleaner.small.0-1733618770940,5,FailOnTimeoutGroup] 2024-12-08T00:46:14,364 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster-HFileCleaner.large.0-1733618770937 {}] cleaner.HFileCleaner(306): Exit Thread[master/0f983e3e5be1:0:becomeActiveMaster-HFileCleaner.large.0-1733618770937,5,FailOnTimeoutGroup] 2024-12-08T00:46:14,365 INFO [M:0;0f983e3e5be1:40457 {}] hbase.ChoreService(370): Chore service for: master/0f983e3e5be1:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-08T00:46:14,365 INFO [M:0;0f983e3e5be1:40457 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-08T00:46:14,365 DEBUG [M:0;0f983e3e5be1:40457 {}] master.HMaster(1795): Stopping service threads 2024-12-08T00:46:14,365 INFO [M:0;0f983e3e5be1:40457 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-08T00:46:14,366 INFO [M:0;0f983e3e5be1:40457 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-08T00:46:14,367 INFO [M:0;0f983e3e5be1:40457 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-08T00:46:14,367 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-08T00:46:14,376 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40457-0x10002f0d7d50000, quorum=127.0.0.1:55931, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-08T00:46:14,376 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40457-0x10002f0d7d50000, quorum=127.0.0.1:55931, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T00:46:14,376 DEBUG [M:0;0f983e3e5be1:40457 {}] zookeeper.ZKUtil(347): master:40457-0x10002f0d7d50000, quorum=127.0.0.1:55931, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-08T00:46:14,376 WARN [M:0;0f983e3e5be1:40457 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-08T00:46:14,377 INFO [M:0;0f983e3e5be1:40457 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:46415/user/jenkins/test-data/f88bb015-a4dc-a798-6f89-5983724f011a/.lastflushedseqids 2024-12-08T00:46:14,385 WARN [M:0;0f983e3e5be1:40457 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-08T00:46:14,385 WARN [M:0;0f983e3e5be1:40457 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-08T00:46:14,388 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_636387061_22 at /127.0.0.1:39064 [Receiving block BP-356801691-172.17.0.2-1733618765086:blk_-9223372036854775584_1032] {}] datanode.DataXceiver(331): 127.0.0.1:41981:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:39064 dst: /127.0.0.1:41981 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:46:14,391 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41981 is added to blk_-9223372036854775584_1033 (size=127) 2024-12-08T00:46:14,392 WARN [M:0;0f983e3e5be1:40457 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-08T00:46:14,392 INFO [M:0;0f983e3e5be1:40457 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-08T00:46:14,392 INFO [M:0;0f983e3e5be1:40457 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-08T00:46:14,392 DEBUG [M:0;0f983e3e5be1:40457 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-08T00:46:14,392 INFO [M:0;0f983e3e5be1:40457 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T00:46:14,392 DEBUG [M:0;0f983e3e5be1:40457 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T00:46:14,392 DEBUG [M:0;0f983e3e5be1:40457 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-08T00:46:14,392 DEBUG [M:0;0f983e3e5be1:40457 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T00:46:14,393 INFO [M:0;0f983e3e5be1:40457 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=26.84 KB heapSize=34.13 KB 2024-12-08T00:46:14,411 DEBUG [M:0;0f983e3e5be1:40457 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46415/user/jenkins/test-data/f88bb015-a4dc-a798-6f89-5983724f011a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/3608a8b2b74344e3877da3f7d7fc5739 is 82, key is hbase:meta,,1/info:regioninfo/1733618771853/Put/seqid=0 2024-12-08T00:46:14,413 WARN [M:0;0f983e3e5be1:40457 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-08T00:46:14,413 WARN [M:0;0f983e3e5be1:40457 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-08T00:46:14,416 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_636387061_22 at /127.0.0.1:39074 [Receiving block BP-356801691-172.17.0.2-1733618765086:blk_-9223372036854775568_1034] {}] datanode.DataXceiver(331): 127.0.0.1:41981:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:39074 dst: /127.0.0.1:41981 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:46:14,419 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41981 is added to blk_-9223372036854775568_1035 (size=5672) 2024-12-08T00:46:14,420 WARN [M:0;0f983e3e5be1:40457 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-08T00:46:14,420 INFO [M:0;0f983e3e5be1:40457 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:46415/user/jenkins/test-data/f88bb015-a4dc-a798-6f89-5983724f011a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/3608a8b2b74344e3877da3f7d7fc5739 2024-12-08T00:46:14,435 INFO [RS:0;0f983e3e5be1:44379 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-08T00:46:14,435 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44379-0x10002f0d7d50001, quorum=127.0.0.1:55931, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-08T00:46:14,435 INFO [RS:0;0f983e3e5be1:44379 {}] regionserver.HRegionServer(1031): Exiting; stopping=0f983e3e5be1,44379,1733618769693; zookeeper connection closed. 2024-12-08T00:46:14,435 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44379-0x10002f0d7d50001, quorum=127.0.0.1:55931, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-08T00:46:14,435 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@78c99d60 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@78c99d60 2024-12-08T00:46:14,443 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38841-0x10002f0d7d50003, quorum=127.0.0.1:55931, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-08T00:46:14,443 INFO [RS:2;0f983e3e5be1:38841 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-08T00:46:14,443 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38841-0x10002f0d7d50003, quorum=127.0.0.1:55931, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-08T00:46:14,443 INFO [RS:2;0f983e3e5be1:38841 {}] regionserver.HRegionServer(1031): Exiting; stopping=0f983e3e5be1,38841,1733618769837; zookeeper connection closed. 2024-12-08T00:46:14,443 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@61836ffc {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@61836ffc 2024-12-08T00:46:14,443 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 3 regionserver(s) complete 2024-12-08T00:46:14,445 DEBUG [M:0;0f983e3e5be1:40457 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46415/user/jenkins/test-data/f88bb015-a4dc-a798-6f89-5983724f011a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/928980fc27874138a5b5940bcbd0cfe1 is 749, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1733618772639/Put/seqid=0 2024-12-08T00:46:14,447 WARN [M:0;0f983e3e5be1:40457 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-08T00:46:14,447 WARN [M:0;0f983e3e5be1:40457 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-08T00:46:14,450 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_636387061_22 at /127.0.0.1:34190 [Receiving block BP-356801691-172.17.0.2-1733618765086:blk_-9223372036854775552_1036] {}] datanode.DataXceiver(331): 127.0.0.1:40453:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34190 dst: /127.0.0.1:40453 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:46:14,454 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40453 is added to blk_-9223372036854775552_1037 (size=6440) 2024-12-08T00:46:14,455 WARN [M:0;0f983e3e5be1:40457 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-08T00:46:14,455 INFO [M:0;0f983e3e5be1:40457 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.16 KB at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:46415/user/jenkins/test-data/f88bb015-a4dc-a798-6f89-5983724f011a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/928980fc27874138a5b5940bcbd0cfe1 2024-12-08T00:46:14,477 DEBUG [M:0;0f983e3e5be1:40457 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46415/user/jenkins/test-data/f88bb015-a4dc-a798-6f89-5983724f011a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/2b8dc806459f41a3845efe0b1b4c482f is 69, key is 0f983e3e5be1,33233,1733618769793/rs:state/1733618771012/Put/seqid=0 2024-12-08T00:46:14,479 WARN [M:0;0f983e3e5be1:40457 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-08T00:46:14,479 WARN [M:0;0f983e3e5be1:40457 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-08T00:46:14,481 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_636387061_22 at /127.0.0.1:39100 [Receiving block BP-356801691-172.17.0.2-1733618765086:blk_-9223372036854775536_1038] {}] datanode.DataXceiver(331): 127.0.0.1:41981:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:39100 dst: /127.0.0.1:41981 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T00:46:14,485 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41981 is added to blk_-9223372036854775536_1039 (size=5294) 2024-12-08T00:46:14,486 WARN [M:0;0f983e3e5be1:40457 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-08T00:46:14,486 INFO [M:0;0f983e3e5be1:40457 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=195 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:46415/user/jenkins/test-data/f88bb015-a4dc-a798-6f89-5983724f011a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/2b8dc806459f41a3845efe0b1b4c482f 2024-12-08T00:46:14,495 DEBUG [M:0;0f983e3e5be1:40457 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46415/user/jenkins/test-data/f88bb015-a4dc-a798-6f89-5983724f011a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/3608a8b2b74344e3877da3f7d7fc5739 as hdfs://localhost:46415/user/jenkins/test-data/f88bb015-a4dc-a798-6f89-5983724f011a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/3608a8b2b74344e3877da3f7d7fc5739 2024-12-08T00:46:14,502 INFO [M:0;0f983e3e5be1:40457 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46415/user/jenkins/test-data/f88bb015-a4dc-a798-6f89-5983724f011a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/3608a8b2b74344e3877da3f7d7fc5739, entries=8, sequenceid=72, filesize=5.5 K 2024-12-08T00:46:14,504 DEBUG [M:0;0f983e3e5be1:40457 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46415/user/jenkins/test-data/f88bb015-a4dc-a798-6f89-5983724f011a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/928980fc27874138a5b5940bcbd0cfe1 as hdfs://localhost:46415/user/jenkins/test-data/f88bb015-a4dc-a798-6f89-5983724f011a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/928980fc27874138a5b5940bcbd0cfe1 2024-12-08T00:46:14,512 INFO [M:0;0f983e3e5be1:40457 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46415/user/jenkins/test-data/f88bb015-a4dc-a798-6f89-5983724f011a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/928980fc27874138a5b5940bcbd0cfe1, entries=8, sequenceid=72, filesize=6.3 K 2024-12-08T00:46:14,513 DEBUG [M:0;0f983e3e5be1:40457 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46415/user/jenkins/test-data/f88bb015-a4dc-a798-6f89-5983724f011a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/2b8dc806459f41a3845efe0b1b4c482f as hdfs://localhost:46415/user/jenkins/test-data/f88bb015-a4dc-a798-6f89-5983724f011a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/2b8dc806459f41a3845efe0b1b4c482f 2024-12-08T00:46:14,521 INFO [M:0;0f983e3e5be1:40457 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46415/user/jenkins/test-data/f88bb015-a4dc-a798-6f89-5983724f011a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/2b8dc806459f41a3845efe0b1b4c482f, entries=3, sequenceid=72, filesize=5.2 K 2024-12-08T00:46:14,522 INFO [M:0;0f983e3e5be1:40457 {}] regionserver.HRegion(3140): Finished flush of dataSize ~26.84 KB/27483, heapSize ~33.83 KB/34640, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 130ms, sequenceid=72, compaction requested=false 2024-12-08T00:46:14,523 INFO [M:0;0f983e3e5be1:40457 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T00:46:14,523 DEBUG [M:0;0f983e3e5be1:40457 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733618774392Disabling compacts and flushes for region at 1733618774392Disabling writes for close at 1733618774392Obtaining lock to block concurrent updates at 1733618774393 (+1 ms)Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733618774393Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=27483, getHeapSize=34880, getOffHeapSize=0, getCellsCount=85 at 1733618774393Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733618774394 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733618774394Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733618774410 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733618774410Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733618774428 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733618774445 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733618774445Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733618774462 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733618774477 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733618774477Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@32bcb75d: reopening flushed file at 1733618774493 (+16 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@322f7493: reopening flushed file at 1733618774502 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@aa4b218: reopening flushed file at 1733618774512 (+10 ms)Finished flush of dataSize ~26.84 KB/27483, heapSize ~33.83 KB/34640, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 130ms, sequenceid=72, compaction requested=false at 1733618774522 (+10 ms)Writing region close event to WAL at 1733618774523 (+1 ms)Closed at 1733618774523 2024-12-08T00:46:14,527 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40453 is added to blk_1073741825_1011 (size=32686) 2024-12-08T00:46:14,527 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41981 is added to blk_1073741825_1011 (size=32686) 2024-12-08T00:46:14,527 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34549 is added to blk_1073741825_1011 (size=32686) 2024-12-08T00:46:14,527 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-08T00:46:14,528 INFO [M:0;0f983e3e5be1:40457 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-08T00:46:14,528 INFO [M:0;0f983e3e5be1:40457 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:40457 2024-12-08T00:46:14,528 INFO [M:0;0f983e3e5be1:40457 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-08T00:46:14,655 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40457-0x10002f0d7d50000, quorum=127.0.0.1:55931, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-08T00:46:14,655 INFO [M:0;0f983e3e5be1:40457 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-08T00:46:14,655 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40457-0x10002f0d7d50000, quorum=127.0.0.1:55931, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-08T00:46:14,699 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2e59159d{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-08T00:46:14,702 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@a8e922f{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-08T00:46:14,702 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-08T00:46:14,702 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@24f92c39{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-08T00:46:14,702 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@c62369b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f276f572-de8e-772d-76ff-9dabc5287ae2/hadoop.log.dir/,STOPPED} 2024-12-08T00:46:14,707 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-08T00:46:14,707 WARN [BP-356801691-172.17.0.2-1733618765086 heartbeating to localhost/127.0.0.1:46415 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-08T00:46:14,708 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-08T00:46:14,708 WARN [BP-356801691-172.17.0.2-1733618765086 heartbeating to localhost/127.0.0.1:46415 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-356801691-172.17.0.2-1733618765086 (Datanode Uuid d01efd29-d8b0-4fa5-8dc1-8b567d2cbc62) service to localhost/127.0.0.1:46415 2024-12-08T00:46:14,710 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f276f572-de8e-772d-76ff-9dabc5287ae2/cluster_fe650a32-a427-b5dd-d78b-8d75fe045f4d/data/data5/current/BP-356801691-172.17.0.2-1733618765086 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-08T00:46:14,711 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f276f572-de8e-772d-76ff-9dabc5287ae2/cluster_fe650a32-a427-b5dd-d78b-8d75fe045f4d/data/data6/current/BP-356801691-172.17.0.2-1733618765086 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-08T00:46:14,711 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-08T00:46:14,713 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1c6b8f01{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-08T00:46:14,713 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@11f28dd2{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-08T00:46:14,714 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-08T00:46:14,714 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7fa8fa5c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-08T00:46:14,714 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6463ad04{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f276f572-de8e-772d-76ff-9dabc5287ae2/hadoop.log.dir/,STOPPED} 2024-12-08T00:46:14,715 WARN [BP-356801691-172.17.0.2-1733618765086 heartbeating to localhost/127.0.0.1:46415 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-08T00:46:14,715 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-08T00:46:14,715 WARN [BP-356801691-172.17.0.2-1733618765086 heartbeating to localhost/127.0.0.1:46415 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-356801691-172.17.0.2-1733618765086 (Datanode Uuid f32052f7-e40d-41db-922e-ecaf018ca26e) service to localhost/127.0.0.1:46415 2024-12-08T00:46:14,715 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-08T00:46:14,716 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f276f572-de8e-772d-76ff-9dabc5287ae2/cluster_fe650a32-a427-b5dd-d78b-8d75fe045f4d/data/data3/current/BP-356801691-172.17.0.2-1733618765086 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-08T00:46:14,716 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f276f572-de8e-772d-76ff-9dabc5287ae2/cluster_fe650a32-a427-b5dd-d78b-8d75fe045f4d/data/data4/current/BP-356801691-172.17.0.2-1733618765086 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-08T00:46:14,716 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-08T00:46:14,722 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4839957b{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-08T00:46:14,722 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5306f615{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-08T00:46:14,722 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-08T00:46:14,722 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1a2478ad{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-08T00:46:14,722 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@550154bd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f276f572-de8e-772d-76ff-9dabc5287ae2/hadoop.log.dir/,STOPPED} 2024-12-08T00:46:14,723 WARN [BP-356801691-172.17.0.2-1733618765086 heartbeating to localhost/127.0.0.1:46415 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-08T00:46:14,723 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-08T00:46:14,723 WARN [BP-356801691-172.17.0.2-1733618765086 heartbeating to localhost/127.0.0.1:46415 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-356801691-172.17.0.2-1733618765086 (Datanode Uuid e64a82fd-317a-45dd-8076-62a80a46ad24) service to localhost/127.0.0.1:46415 2024-12-08T00:46:14,723 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-08T00:46:14,724 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f276f572-de8e-772d-76ff-9dabc5287ae2/cluster_fe650a32-a427-b5dd-d78b-8d75fe045f4d/data/data1/current/BP-356801691-172.17.0.2-1733618765086 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-08T00:46:14,724 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f276f572-de8e-772d-76ff-9dabc5287ae2/cluster_fe650a32-a427-b5dd-d78b-8d75fe045f4d/data/data2/current/BP-356801691-172.17.0.2-1733618765086 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-08T00:46:14,724 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-08T00:46:14,729 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@76e4c45c{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-08T00:46:14,730 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4637aff6{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-08T00:46:14,730 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-08T00:46:14,730 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@383d55e4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-08T00:46:14,730 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@21b7d177{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f276f572-de8e-772d-76ff-9dabc5287ae2/hadoop.log.dir/,STOPPED} 2024-12-08T00:46:14,737 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-08T00:46:14,763 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-08T00:46:14,769 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestHBaseWalOnEC#testReadWrite[0] Thread=90 (was 159), OpenFileDescriptor=445 (was 391) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=179 (was 183), ProcessCount=11 (was 11), AvailableMemoryMB=17830 (was 18113) 2024-12-08T00:46:14,774 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestHBaseWalOnEC#testReadWrite[1] Thread=90, OpenFileDescriptor=445, MaxFileDescriptor=1048576, SystemLoadAverage=179, ProcessCount=11, AvailableMemoryMB=17829 2024-12-08T00:46:14,774 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=3, rsPorts=, rsClass=null, numDataNodes=3, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-08T00:46:14,775 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f276f572-de8e-772d-76ff-9dabc5287ae2/hadoop.log.dir so I do NOT create it in target/test-data/9b239ae7-7a70-0b46-e541-fc72a85e4f85 2024-12-08T00:46:14,775 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f276f572-de8e-772d-76ff-9dabc5287ae2/hadoop.tmp.dir so I do NOT create it in target/test-data/9b239ae7-7a70-0b46-e541-fc72a85e4f85 2024-12-08T00:46:14,775 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9b239ae7-7a70-0b46-e541-fc72a85e4f85/cluster_df0b88a1-4eae-bae8-26c2-deef4205a8eb, deleteOnExit=true 2024-12-08T00:46:14,775 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-08T00:46:14,775 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9b239ae7-7a70-0b46-e541-fc72a85e4f85/test.cache.data in system properties and HBase conf 2024-12-08T00:46:14,775 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9b239ae7-7a70-0b46-e541-fc72a85e4f85/hadoop.tmp.dir in system properties and HBase conf 2024-12-08T00:46:14,775 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9b239ae7-7a70-0b46-e541-fc72a85e4f85/hadoop.log.dir in system properties and HBase conf 2024-12-08T00:46:14,775 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9b239ae7-7a70-0b46-e541-fc72a85e4f85/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-08T00:46:14,775 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9b239ae7-7a70-0b46-e541-fc72a85e4f85/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-08T00:46:14,775 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-08T00:46:14,775 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-08T00:46:14,776 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9b239ae7-7a70-0b46-e541-fc72a85e4f85/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-08T00:46:14,776 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9b239ae7-7a70-0b46-e541-fc72a85e4f85/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-08T00:46:14,776 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9b239ae7-7a70-0b46-e541-fc72a85e4f85/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-08T00:46:14,776 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9b239ae7-7a70-0b46-e541-fc72a85e4f85/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-08T00:46:14,776 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9b239ae7-7a70-0b46-e541-fc72a85e4f85/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-08T00:46:14,776 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9b239ae7-7a70-0b46-e541-fc72a85e4f85/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-08T00:46:14,776 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9b239ae7-7a70-0b46-e541-fc72a85e4f85/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-08T00:46:14,776 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9b239ae7-7a70-0b46-e541-fc72a85e4f85/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-08T00:46:14,776 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9b239ae7-7a70-0b46-e541-fc72a85e4f85/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-08T00:46:14,776 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9b239ae7-7a70-0b46-e541-fc72a85e4f85/nfs.dump.dir in system properties and HBase conf 2024-12-08T00:46:14,776 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9b239ae7-7a70-0b46-e541-fc72a85e4f85/java.io.tmpdir in system properties and HBase conf 2024-12-08T00:46:14,776 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9b239ae7-7a70-0b46-e541-fc72a85e4f85/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-08T00:46:14,777 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9b239ae7-7a70-0b46-e541-fc72a85e4f85/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-08T00:46:14,777 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9b239ae7-7a70-0b46-e541-fc72a85e4f85/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-08T00:46:15,003 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-08T00:46:15,007 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-08T00:46:15,011 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-08T00:46:15,011 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-08T00:46:15,011 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-08T00:46:15,012 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-08T00:46:15,012 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@18f854cf{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9b239ae7-7a70-0b46-e541-fc72a85e4f85/hadoop.log.dir/,AVAILABLE} 2024-12-08T00:46:15,013 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@16eaa68d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-08T00:46:15,101 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6ffa125c{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9b239ae7-7a70-0b46-e541-fc72a85e4f85/java.io.tmpdir/jetty-localhost-38879-hadoop-hdfs-3_4_1-tests_jar-_-any-7522928460519318536/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-08T00:46:15,102 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3aa18531{HTTP/1.1, (http/1.1)}{localhost:38879} 2024-12-08T00:46:15,102 INFO [Time-limited test {}] server.Server(415): Started @11630ms 2024-12-08T00:46:15,301 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-08T00:46:15,304 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-08T00:46:15,305 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-08T00:46:15,305 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-08T00:46:15,305 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-08T00:46:15,306 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@137179d0{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9b239ae7-7a70-0b46-e541-fc72a85e4f85/hadoop.log.dir/,AVAILABLE} 2024-12-08T00:46:15,306 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@61d23bc{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-08T00:46:15,395 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@700f39d7{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9b239ae7-7a70-0b46-e541-fc72a85e4f85/java.io.tmpdir/jetty-localhost-40271-hadoop-hdfs-3_4_1-tests_jar-_-any-10745985260654572296/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-08T00:46:15,396 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4e9ae4fc{HTTP/1.1, (http/1.1)}{localhost:40271} 2024-12-08T00:46:15,396 INFO [Time-limited test {}] server.Server(415): Started @11924ms 2024-12-08T00:46:15,397 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-08T00:46:15,427 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-08T00:46:15,430 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-08T00:46:15,430 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-08T00:46:15,430 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-08T00:46:15,431 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-08T00:46:15,431 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2c597470{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9b239ae7-7a70-0b46-e541-fc72a85e4f85/hadoop.log.dir/,AVAILABLE} 2024-12-08T00:46:15,431 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4e5afbc4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-08T00:46:15,519 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6f8d2ee2{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9b239ae7-7a70-0b46-e541-fc72a85e4f85/java.io.tmpdir/jetty-localhost-45929-hadoop-hdfs-3_4_1-tests_jar-_-any-12590350353505891170/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-08T00:46:15,519 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6beabb01{HTTP/1.1, (http/1.1)}{localhost:45929} 2024-12-08T00:46:15,519 INFO [Time-limited test {}] server.Server(415): Started @12047ms 2024-12-08T00:46:15,521 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-08T00:46:15,546 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-08T00:46:15,549 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-08T00:46:15,549 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-08T00:46:15,549 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-08T00:46:15,549 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-08T00:46:15,550 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@73f6422f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9b239ae7-7a70-0b46-e541-fc72a85e4f85/hadoop.log.dir/,AVAILABLE} 2024-12-08T00:46:15,550 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4c77de1{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-08T00:46:15,639 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6e89cb0b{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9b239ae7-7a70-0b46-e541-fc72a85e4f85/java.io.tmpdir/jetty-localhost-42221-hadoop-hdfs-3_4_1-tests_jar-_-any-14090202617900405754/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-08T00:46:15,639 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6b3c8c82{HTTP/1.1, (http/1.1)}{localhost:42221} 2024-12-08T00:46:15,639 INFO [Time-limited test {}] server.Server(415): Started @12167ms 2024-12-08T00:46:15,640 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-08T00:46:16,377 WARN [Thread-570 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9b239ae7-7a70-0b46-e541-fc72a85e4f85/cluster_df0b88a1-4eae-bae8-26c2-deef4205a8eb/data/data1/current/BP-999966035-172.17.0.2-1733618774799/current, will proceed with Du for space computation calculation, 2024-12-08T00:46:16,377 WARN [Thread-571 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9b239ae7-7a70-0b46-e541-fc72a85e4f85/cluster_df0b88a1-4eae-bae8-26c2-deef4205a8eb/data/data2/current/BP-999966035-172.17.0.2-1733618774799/current, will proceed with Du for space computation calculation, 2024-12-08T00:46:16,393 WARN [Thread-510 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-08T00:46:16,396 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xd1148e884dff098c with lease ID 0xda0ad2f176e79f11: Processing first storage report for DS-9cd6b0fd-dde0-43d6-8157-b8a46b683b85 from datanode DatanodeRegistration(127.0.0.1:44495, datanodeUuid=ad7a1ce7-eb1a-4d51-a229-8417d2ca59c6, infoPort=34185, infoSecurePort=0, ipcPort=40831, storageInfo=lv=-57;cid=testClusterID;nsid=514343182;c=1733618774799) 2024-12-08T00:46:16,396 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd1148e884dff098c with lease ID 0xda0ad2f176e79f11: from storage DS-9cd6b0fd-dde0-43d6-8157-b8a46b683b85 node DatanodeRegistration(127.0.0.1:44495, datanodeUuid=ad7a1ce7-eb1a-4d51-a229-8417d2ca59c6, infoPort=34185, infoSecurePort=0, ipcPort=40831, storageInfo=lv=-57;cid=testClusterID;nsid=514343182;c=1733618774799), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-08T00:46:16,396 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xd1148e884dff098c with lease ID 0xda0ad2f176e79f11: Processing first storage report for DS-b8c2e651-1172-4e47-b4a4-6aa453ea77cd from datanode DatanodeRegistration(127.0.0.1:44495, datanodeUuid=ad7a1ce7-eb1a-4d51-a229-8417d2ca59c6, infoPort=34185, infoSecurePort=0, ipcPort=40831, storageInfo=lv=-57;cid=testClusterID;nsid=514343182;c=1733618774799) 2024-12-08T00:46:16,396 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd1148e884dff098c with lease ID 0xda0ad2f176e79f11: from storage DS-b8c2e651-1172-4e47-b4a4-6aa453ea77cd node DatanodeRegistration(127.0.0.1:44495, datanodeUuid=ad7a1ce7-eb1a-4d51-a229-8417d2ca59c6, infoPort=34185, infoSecurePort=0, ipcPort=40831, storageInfo=lv=-57;cid=testClusterID;nsid=514343182;c=1733618774799), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-08T00:46:16,570 WARN [Thread-581 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9b239ae7-7a70-0b46-e541-fc72a85e4f85/cluster_df0b88a1-4eae-bae8-26c2-deef4205a8eb/data/data3/current/BP-999966035-172.17.0.2-1733618774799/current, will proceed with Du for space computation calculation, 2024-12-08T00:46:16,570 WARN [Thread-582 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9b239ae7-7a70-0b46-e541-fc72a85e4f85/cluster_df0b88a1-4eae-bae8-26c2-deef4205a8eb/data/data4/current/BP-999966035-172.17.0.2-1733618774799/current, will proceed with Du for space computation calculation, 2024-12-08T00:46:16,589 WARN [Thread-533 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-08T00:46:16,591 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xa14ab74658f0a9c4 with lease ID 0xda0ad2f176e79f12: Processing first storage report for DS-1ae6c774-5e0b-40c0-8835-c4ad1c696897 from datanode DatanodeRegistration(127.0.0.1:38147, datanodeUuid=74131f4e-aee3-452e-aae2-b0f177768a54, infoPort=44893, infoSecurePort=0, ipcPort=40819, storageInfo=lv=-57;cid=testClusterID;nsid=514343182;c=1733618774799) 2024-12-08T00:46:16,592 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa14ab74658f0a9c4 with lease ID 0xda0ad2f176e79f12: from storage DS-1ae6c774-5e0b-40c0-8835-c4ad1c696897 node DatanodeRegistration(127.0.0.1:38147, datanodeUuid=74131f4e-aee3-452e-aae2-b0f177768a54, infoPort=44893, infoSecurePort=0, ipcPort=40819, storageInfo=lv=-57;cid=testClusterID;nsid=514343182;c=1733618774799), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-08T00:46:16,592 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xa14ab74658f0a9c4 with lease ID 0xda0ad2f176e79f12: Processing first storage report for DS-42b4543d-cf48-4e8c-a5c9-addc03c75fb0 from datanode DatanodeRegistration(127.0.0.1:38147, datanodeUuid=74131f4e-aee3-452e-aae2-b0f177768a54, infoPort=44893, infoSecurePort=0, ipcPort=40819, storageInfo=lv=-57;cid=testClusterID;nsid=514343182;c=1733618774799) 2024-12-08T00:46:16,592 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa14ab74658f0a9c4 with lease ID 0xda0ad2f176e79f12: from storage DS-42b4543d-cf48-4e8c-a5c9-addc03c75fb0 node DatanodeRegistration(127.0.0.1:38147, datanodeUuid=74131f4e-aee3-452e-aae2-b0f177768a54, infoPort=44893, infoSecurePort=0, ipcPort=40819, storageInfo=lv=-57;cid=testClusterID;nsid=514343182;c=1733618774799), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-08T00:46:16,646 WARN [Thread-593 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9b239ae7-7a70-0b46-e541-fc72a85e4f85/cluster_df0b88a1-4eae-bae8-26c2-deef4205a8eb/data/data6/current/BP-999966035-172.17.0.2-1733618774799/current, will proceed with Du for space computation calculation, 2024-12-08T00:46:16,646 WARN [Thread-592 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9b239ae7-7a70-0b46-e541-fc72a85e4f85/cluster_df0b88a1-4eae-bae8-26c2-deef4205a8eb/data/data5/current/BP-999966035-172.17.0.2-1733618774799/current, will proceed with Du for space computation calculation, 2024-12-08T00:46:16,670 WARN [Thread-555 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-08T00:46:16,673 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x6dacda99cce46998 with lease ID 0xda0ad2f176e79f13: Processing first storage report for DS-d1fa647a-c37a-45a0-9811-9dbd77ab2ed2 from datanode DatanodeRegistration(127.0.0.1:42107, datanodeUuid=9f4c8d06-da19-45ff-8182-f77e68c06b76, infoPort=43313, infoSecurePort=0, ipcPort=42379, storageInfo=lv=-57;cid=testClusterID;nsid=514343182;c=1733618774799) 2024-12-08T00:46:16,674 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x6dacda99cce46998 with lease ID 0xda0ad2f176e79f13: from storage DS-d1fa647a-c37a-45a0-9811-9dbd77ab2ed2 node DatanodeRegistration(127.0.0.1:42107, datanodeUuid=9f4c8d06-da19-45ff-8182-f77e68c06b76, infoPort=43313, infoSecurePort=0, ipcPort=42379, storageInfo=lv=-57;cid=testClusterID;nsid=514343182;c=1733618774799), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-08T00:46:16,674 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x6dacda99cce46998 with lease ID 0xda0ad2f176e79f13: Processing first storage report for DS-4405bf3e-2d71-46fb-92d7-71971af421fe from datanode DatanodeRegistration(127.0.0.1:42107, datanodeUuid=9f4c8d06-da19-45ff-8182-f77e68c06b76, infoPort=43313, infoSecurePort=0, ipcPort=42379, storageInfo=lv=-57;cid=testClusterID;nsid=514343182;c=1733618774799) 2024-12-08T00:46:16,674 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x6dacda99cce46998 with lease ID 0xda0ad2f176e79f13: from storage DS-4405bf3e-2d71-46fb-92d7-71971af421fe node DatanodeRegistration(127.0.0.1:42107, datanodeUuid=9f4c8d06-da19-45ff-8182-f77e68c06b76, infoPort=43313, infoSecurePort=0, ipcPort=42379, storageInfo=lv=-57;cid=testClusterID;nsid=514343182;c=1733618774799), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-08T00:46:16,676 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9b239ae7-7a70-0b46-e541-fc72a85e4f85 2024-12-08T00:46:16,679 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9b239ae7-7a70-0b46-e541-fc72a85e4f85/cluster_df0b88a1-4eae-bae8-26c2-deef4205a8eb/zookeeper_0, clientPort=58726, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9b239ae7-7a70-0b46-e541-fc72a85e4f85/cluster_df0b88a1-4eae-bae8-26c2-deef4205a8eb/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9b239ae7-7a70-0b46-e541-fc72a85e4f85/cluster_df0b88a1-4eae-bae8-26c2-deef4205a8eb/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-08T00:46:16,679 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=58726 2024-12-08T00:46:16,680 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T00:46:16,681 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T00:46:16,694 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44495 is added to blk_1073741825_1001 (size=7) 2024-12-08T00:46:16,694 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42107 is added to blk_1073741825_1001 (size=7) 2024-12-08T00:46:16,695 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38147 is added to blk_1073741825_1001 (size=7) 2024-12-08T00:46:16,696 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:40987/user/jenkins/test-data/ba186fac-90bc-c946-ca0b-016b833ef7f1 with version=8 2024-12-08T00:46:16,697 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:46415/user/jenkins/test-data/f88bb015-a4dc-a798-6f89-5983724f011a/hbase-staging 2024-12-08T00:46:16,699 INFO [Time-limited test {}] client.ConnectionUtils(128): master/0f983e3e5be1:0 server-side Connection retries=45 2024-12-08T00:46:16,700 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-08T00:46:16,700 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-08T00:46:16,700 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-08T00:46:16,700 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-08T00:46:16,700 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-08T00:46:16,700 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-08T00:46:16,700 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-08T00:46:16,701 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:42509 2024-12-08T00:46:16,702 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:42509 connecting to ZooKeeper ensemble=127.0.0.1:58726 2024-12-08T00:46:16,741 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:425090x0, quorum=127.0.0.1:58726, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-08T00:46:16,742 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:42509-0x10002f0f9030000 connected 2024-12-08T00:46:16,821 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T00:46:16,824 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T00:46:16,827 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:42509-0x10002f0f9030000, quorum=127.0.0.1:58726, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-08T00:46:16,828 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:40987/user/jenkins/test-data/ba186fac-90bc-c946-ca0b-016b833ef7f1, hbase.cluster.distributed=false 2024-12-08T00:46:16,830 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:42509-0x10002f0f9030000, quorum=127.0.0.1:58726, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-08T00:46:16,830 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=42509 2024-12-08T00:46:16,830 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=42509 2024-12-08T00:46:16,831 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=42509 2024-12-08T00:46:16,831 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=42509 2024-12-08T00:46:16,835 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=42509 2024-12-08T00:46:16,848 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/0f983e3e5be1:0 server-side Connection retries=45 2024-12-08T00:46:16,848 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-08T00:46:16,849 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-08T00:46:16,849 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-08T00:46:16,849 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-08T00:46:16,849 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-08T00:46:16,849 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-08T00:46:16,849 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-08T00:46:16,849 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:41367 2024-12-08T00:46:16,850 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:41367 connecting to ZooKeeper ensemble=127.0.0.1:58726 2024-12-08T00:46:16,851 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T00:46:16,853 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T00:46:16,862 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:413670x0, quorum=127.0.0.1:58726, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-08T00:46:16,862 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:41367-0x10002f0f9030001 connected 2024-12-08T00:46:16,862 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41367-0x10002f0f9030001, quorum=127.0.0.1:58726, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-08T00:46:16,863 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-08T00:46:16,863 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-08T00:46:16,864 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41367-0x10002f0f9030001, quorum=127.0.0.1:58726, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-08T00:46:16,865 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41367-0x10002f0f9030001, quorum=127.0.0.1:58726, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-08T00:46:16,866 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=41367 2024-12-08T00:46:16,866 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=41367 2024-12-08T00:46:16,866 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=41367 2024-12-08T00:46:16,867 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=41367 2024-12-08T00:46:16,867 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=41367 2024-12-08T00:46:16,883 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/0f983e3e5be1:0 server-side Connection retries=45 2024-12-08T00:46:16,883 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-08T00:46:16,883 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-08T00:46:16,883 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-08T00:46:16,883 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-08T00:46:16,883 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-08T00:46:16,883 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-08T00:46:16,883 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-08T00:46:16,884 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:39157 2024-12-08T00:46:16,885 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:39157 connecting to ZooKeeper ensemble=127.0.0.1:58726 2024-12-08T00:46:16,886 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T00:46:16,887 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T00:46:16,895 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:391570x0, quorum=127.0.0.1:58726, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-08T00:46:16,896 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39157-0x10002f0f9030002, quorum=127.0.0.1:58726, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-08T00:46:16,896 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:39157-0x10002f0f9030002 connected 2024-12-08T00:46:16,896 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-08T00:46:16,897 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-08T00:46:16,897 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39157-0x10002f0f9030002, quorum=127.0.0.1:58726, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-08T00:46:16,898 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39157-0x10002f0f9030002, quorum=127.0.0.1:58726, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-08T00:46:16,899 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=39157 2024-12-08T00:46:16,899 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=39157 2024-12-08T00:46:16,899 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=39157 2024-12-08T00:46:16,906 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=39157 2024-12-08T00:46:16,906 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=39157 2024-12-08T00:46:16,920 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/0f983e3e5be1:0 server-side Connection retries=45 2024-12-08T00:46:16,921 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-08T00:46:16,921 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-08T00:46:16,921 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-08T00:46:16,921 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-08T00:46:16,921 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-08T00:46:16,921 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-08T00:46:16,921 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-08T00:46:16,922 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:42617 2024-12-08T00:46:16,923 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:42617 connecting to ZooKeeper ensemble=127.0.0.1:58726 2024-12-08T00:46:16,924 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T00:46:16,926 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T00:46:16,937 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:426170x0, quorum=127.0.0.1:58726, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-08T00:46:16,937 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:42617-0x10002f0f9030003 connected 2024-12-08T00:46:16,937 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42617-0x10002f0f9030003, quorum=127.0.0.1:58726, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-08T00:46:16,938 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-08T00:46:16,938 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-08T00:46:16,939 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42617-0x10002f0f9030003, quorum=127.0.0.1:58726, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-08T00:46:16,940 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42617-0x10002f0f9030003, quorum=127.0.0.1:58726, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-08T00:46:16,941 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=42617 2024-12-08T00:46:16,941 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=42617 2024-12-08T00:46:16,941 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=42617 2024-12-08T00:46:16,942 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=42617 2024-12-08T00:46:16,942 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=42617 2024-12-08T00:46:16,954 DEBUG [M:0;0f983e3e5be1:42509 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;0f983e3e5be1:42509 2024-12-08T00:46:16,954 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/0f983e3e5be1,42509,1733618776699 2024-12-08T00:46:16,962 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39157-0x10002f0f9030002, quorum=127.0.0.1:58726, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-08T00:46:16,962 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42509-0x10002f0f9030000, quorum=127.0.0.1:58726, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-08T00:46:16,962 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41367-0x10002f0f9030001, quorum=127.0.0.1:58726, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-08T00:46:16,962 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42617-0x10002f0f9030003, quorum=127.0.0.1:58726, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-08T00:46:16,963 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:42509-0x10002f0f9030000, quorum=127.0.0.1:58726, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/0f983e3e5be1,42509,1733618776699 2024-12-08T00:46:16,970 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41367-0x10002f0f9030001, quorum=127.0.0.1:58726, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-08T00:46:16,970 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42617-0x10002f0f9030003, quorum=127.0.0.1:58726, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-08T00:46:16,970 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39157-0x10002f0f9030002, quorum=127.0.0.1:58726, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-08T00:46:16,970 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42509-0x10002f0f9030000, quorum=127.0.0.1:58726, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T00:46:16,970 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41367-0x10002f0f9030001, quorum=127.0.0.1:58726, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T00:46:16,970 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42617-0x10002f0f9030003, quorum=127.0.0.1:58726, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T00:46:16,970 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39157-0x10002f0f9030002, quorum=127.0.0.1:58726, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T00:46:16,971 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:42509-0x10002f0f9030000, quorum=127.0.0.1:58726, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-08T00:46:16,972 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/0f983e3e5be1,42509,1733618776699 from backup master directory 2024-12-08T00:46:16,979 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42509-0x10002f0f9030000, quorum=127.0.0.1:58726, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/0f983e3e5be1,42509,1733618776699 2024-12-08T00:46:16,979 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41367-0x10002f0f9030001, quorum=127.0.0.1:58726, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-08T00:46:16,979 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42617-0x10002f0f9030003, quorum=127.0.0.1:58726, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-08T00:46:16,979 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39157-0x10002f0f9030002, quorum=127.0.0.1:58726, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-08T00:46:16,979 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42509-0x10002f0f9030000, quorum=127.0.0.1:58726, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-08T00:46:16,979 WARN [master/0f983e3e5be1:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-08T00:46:16,979 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=0f983e3e5be1,42509,1733618776699 2024-12-08T00:46:16,985 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:40987/user/jenkins/test-data/ba186fac-90bc-c946-ca0b-016b833ef7f1/hbase.id] with ID: cb67eef0-d80a-4034-9c12-4df4f1430ac1 2024-12-08T00:46:16,985 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:40987/user/jenkins/test-data/ba186fac-90bc-c946-ca0b-016b833ef7f1/.tmp/hbase.id 2024-12-08T00:46:16,993 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42107 is added to blk_1073741826_1002 (size=42) 2024-12-08T00:46:16,993 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38147 is added to blk_1073741826_1002 (size=42) 2024-12-08T00:46:16,993 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44495 is added to blk_1073741826_1002 (size=42) 2024-12-08T00:46:16,994 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:40987/user/jenkins/test-data/ba186fac-90bc-c946-ca0b-016b833ef7f1/.tmp/hbase.id]:[hdfs://localhost:40987/user/jenkins/test-data/ba186fac-90bc-c946-ca0b-016b833ef7f1/hbase.id] 2024-12-08T00:46:17,011 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T00:46:17,011 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-08T00:46:17,012 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-12-08T00:46:17,020 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41367-0x10002f0f9030001, quorum=127.0.0.1:58726, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T00:46:17,020 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42617-0x10002f0f9030003, quorum=127.0.0.1:58726, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T00:46:17,020 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39157-0x10002f0f9030002, quorum=127.0.0.1:58726, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T00:46:17,020 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42509-0x10002f0f9030000, quorum=127.0.0.1:58726, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T00:46:17,030 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44495 is added to blk_1073741827_1003 (size=196) 2024-12-08T00:46:17,030 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42107 is added to blk_1073741827_1003 (size=196) 2024-12-08T00:46:17,031 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38147 is added to blk_1073741827_1003 (size=196) 2024-12-08T00:46:17,032 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-08T00:46:17,033 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-08T00:46:17,033 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-08T00:46:17,045 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44495 is added to blk_1073741828_1004 (size=1189) 2024-12-08T00:46:17,046 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42107 is added to blk_1073741828_1004 (size=1189) 2024-12-08T00:46:17,046 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38147 is added to blk_1073741828_1004 (size=1189) 2024-12-08T00:46:17,047 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:40987/user/jenkins/test-data/ba186fac-90bc-c946-ca0b-016b833ef7f1/MasterData/data/master/store 2024-12-08T00:46:17,056 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42107 is added to blk_1073741829_1005 (size=34) 2024-12-08T00:46:17,056 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38147 is added to blk_1073741829_1005 (size=34) 2024-12-08T00:46:17,056 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44495 is added to blk_1073741829_1005 (size=34) 2024-12-08T00:46:17,057 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T00:46:17,057 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-08T00:46:17,057 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T00:46:17,057 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T00:46:17,057 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-08T00:46:17,057 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T00:46:17,057 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T00:46:17,058 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733618777057Disabling compacts and flushes for region at 1733618777057Disabling writes for close at 1733618777057Writing region close event to WAL at 1733618777057Closed at 1733618777057 2024-12-08T00:46:17,058 WARN [master/0f983e3e5be1:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:40987/user/jenkins/test-data/ba186fac-90bc-c946-ca0b-016b833ef7f1/MasterData/data/master/store/.initializing 2024-12-08T00:46:17,058 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:40987/user/jenkins/test-data/ba186fac-90bc-c946-ca0b-016b833ef7f1/MasterData/WALs/0f983e3e5be1,42509,1733618776699 2024-12-08T00:46:17,062 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=0f983e3e5be1%2C42509%2C1733618776699, suffix=, logDir=hdfs://localhost:40987/user/jenkins/test-data/ba186fac-90bc-c946-ca0b-016b833ef7f1/MasterData/WALs/0f983e3e5be1,42509,1733618776699, archiveDir=hdfs://localhost:40987/user/jenkins/test-data/ba186fac-90bc-c946-ca0b-016b833ef7f1/MasterData/oldWALs, maxLogs=10 2024-12-08T00:46:17,063 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0f983e3e5be1%2C42509%2C1733618776699.1733618777062 2024-12-08T00:46:17,073 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/ba186fac-90bc-c946-ca0b-016b833ef7f1/MasterData/WALs/0f983e3e5be1,42509,1733618776699/0f983e3e5be1%2C42509%2C1733618776699.1733618777062 2024-12-08T00:46:17,077 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43313:43313),(127.0.0.1/127.0.0.1:44893:44893),(127.0.0.1/127.0.0.1:34185:34185)] 2024-12-08T00:46:17,078 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-08T00:46:17,078 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T00:46:17,078 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-08T00:46:17,078 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-08T00:46:17,080 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-08T00:46:17,082 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-08T00:46:17,082 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:46:17,083 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T00:46:17,083 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-08T00:46:17,085 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-08T00:46:17,085 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:46:17,086 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-08T00:46:17,086 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-08T00:46:17,088 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-08T00:46:17,089 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:46:17,090 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-08T00:46:17,090 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-08T00:46:17,092 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-08T00:46:17,092 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:46:17,092 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-08T00:46:17,092 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-08T00:46:17,093 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40987/user/jenkins/test-data/ba186fac-90bc-c946-ca0b-016b833ef7f1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-08T00:46:17,094 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40987/user/jenkins/test-data/ba186fac-90bc-c946-ca0b-016b833ef7f1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-08T00:46:17,095 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-08T00:46:17,095 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-08T00:46:17,096 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-08T00:46:17,097 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-08T00:46:17,099 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40987/user/jenkins/test-data/ba186fac-90bc-c946-ca0b-016b833ef7f1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-08T00:46:17,100 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=67711927, jitterRate=0.008986338973045349}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-08T00:46:17,101 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733618777078Initializing all the Stores at 1733618777080 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733618777080Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733618777080Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733618777080Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733618777080Cleaning up temporary data from old regions at 1733618777095 (+15 ms)Region opened successfully at 1733618777101 (+6 ms) 2024-12-08T00:46:17,102 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-08T00:46:17,107 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@794e38a8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=0f983e3e5be1/172.17.0.2:0 2024-12-08T00:46:17,108 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-08T00:46:17,108 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-08T00:46:17,108 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-08T00:46:17,108 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-08T00:46:17,109 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-12-08T00:46:17,110 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-12-08T00:46:17,110 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-08T00:46:17,112 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-08T00:46:17,113 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42509-0x10002f0f9030000, quorum=127.0.0.1:58726, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-08T00:46:17,129 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-08T00:46:17,129 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-08T00:46:17,130 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42509-0x10002f0f9030000, quorum=127.0.0.1:58726, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-08T00:46:17,137 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-08T00:46:17,137 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-08T00:46:17,139 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42509-0x10002f0f9030000, quorum=127.0.0.1:58726, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-08T00:46:17,145 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-08T00:46:17,146 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42509-0x10002f0f9030000, quorum=127.0.0.1:58726, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-08T00:46:17,153 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-08T00:46:17,157 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42509-0x10002f0f9030000, quorum=127.0.0.1:58726, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-08T00:46:17,167 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-08T00:46:17,179 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41367-0x10002f0f9030001, quorum=127.0.0.1:58726, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-08T00:46:17,179 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42509-0x10002f0f9030000, quorum=127.0.0.1:58726, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-08T00:46:17,179 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39157-0x10002f0f9030002, quorum=127.0.0.1:58726, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-08T00:46:17,179 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42617-0x10002f0f9030003, quorum=127.0.0.1:58726, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-08T00:46:17,179 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41367-0x10002f0f9030001, quorum=127.0.0.1:58726, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T00:46:17,179 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42509-0x10002f0f9030000, quorum=127.0.0.1:58726, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T00:46:17,179 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39157-0x10002f0f9030002, quorum=127.0.0.1:58726, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T00:46:17,179 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42617-0x10002f0f9030003, quorum=127.0.0.1:58726, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T00:46:17,179 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=0f983e3e5be1,42509,1733618776699, sessionid=0x10002f0f9030000, setting cluster-up flag (Was=false) 2024-12-08T00:46:17,195 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42509-0x10002f0f9030000, quorum=127.0.0.1:58726, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T00:46:17,195 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42617-0x10002f0f9030003, quorum=127.0.0.1:58726, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T00:46:17,195 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39157-0x10002f0f9030002, quorum=127.0.0.1:58726, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T00:46:17,195 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41367-0x10002f0f9030001, quorum=127.0.0.1:58726, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T00:46:17,220 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-08T00:46:17,223 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=0f983e3e5be1,42509,1733618776699 2024-12-08T00:46:17,246 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39157-0x10002f0f9030002, quorum=127.0.0.1:58726, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T00:46:17,246 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41367-0x10002f0f9030001, quorum=127.0.0.1:58726, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T00:46:17,246 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42509-0x10002f0f9030000, quorum=127.0.0.1:58726, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T00:46:17,246 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42617-0x10002f0f9030003, quorum=127.0.0.1:58726, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T00:46:17,270 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-08T00:46:17,271 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=0f983e3e5be1,42509,1733618776699 2024-12-08T00:46:17,273 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:40987/user/jenkins/test-data/ba186fac-90bc-c946-ca0b-016b833ef7f1/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-08T00:46:17,275 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-08T00:46:17,275 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-08T00:46:17,275 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-08T00:46:17,276 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 0f983e3e5be1,42509,1733618776699 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-08T00:46:17,277 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/0f983e3e5be1:0, corePoolSize=5, maxPoolSize=5 2024-12-08T00:46:17,277 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/0f983e3e5be1:0, corePoolSize=5, maxPoolSize=5 2024-12-08T00:46:17,277 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/0f983e3e5be1:0, corePoolSize=5, maxPoolSize=5 2024-12-08T00:46:17,277 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/0f983e3e5be1:0, corePoolSize=5, maxPoolSize=5 2024-12-08T00:46:17,277 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/0f983e3e5be1:0, corePoolSize=10, maxPoolSize=10 2024-12-08T00:46:17,278 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/0f983e3e5be1:0, corePoolSize=1, maxPoolSize=1 2024-12-08T00:46:17,278 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/0f983e3e5be1:0, corePoolSize=2, maxPoolSize=2 2024-12-08T00:46:17,278 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/0f983e3e5be1:0, corePoolSize=1, maxPoolSize=1 2024-12-08T00:46:17,278 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733618807278 2024-12-08T00:46:17,279 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-08T00:46:17,279 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-08T00:46:17,279 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-08T00:46:17,279 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-08T00:46:17,279 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-08T00:46:17,279 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-08T00:46:17,279 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-08T00:46:17,280 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-08T00:46:17,280 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-08T00:46:17,280 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-08T00:46:17,280 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-08T00:46:17,280 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-08T00:46:17,280 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-08T00:46:17,280 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-08T00:46:17,280 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/0f983e3e5be1:0:becomeActiveMaster-HFileCleaner.large.0-1733618777280,5,FailOnTimeoutGroup] 2024-12-08T00:46:17,281 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/0f983e3e5be1:0:becomeActiveMaster-HFileCleaner.small.0-1733618777280,5,FailOnTimeoutGroup] 2024-12-08T00:46:17,281 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-08T00:46:17,281 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-08T00:46:17,281 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-08T00:46:17,281 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-08T00:46:17,282 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:46:17,282 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-08T00:46:17,293 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44495 is added to blk_1073741831_1007 (size=1321) 2024-12-08T00:46:17,293 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42107 is added to blk_1073741831_1007 (size=1321) 2024-12-08T00:46:17,293 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38147 is added to blk_1073741831_1007 (size=1321) 2024-12-08T00:46:17,295 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:40987/user/jenkins/test-data/ba186fac-90bc-c946-ca0b-016b833ef7f1/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-08T00:46:17,295 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:40987/user/jenkins/test-data/ba186fac-90bc-c946-ca0b-016b833ef7f1 2024-12-08T00:46:17,304 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44495 is added to blk_1073741832_1008 (size=32) 2024-12-08T00:46:17,304 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38147 is added to blk_1073741832_1008 (size=32) 2024-12-08T00:46:17,304 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42107 is added to blk_1073741832_1008 (size=32) 2024-12-08T00:46:17,305 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T00:46:17,307 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-08T00:46:17,309 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-08T00:46:17,309 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:46:17,309 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T00:46:17,310 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-08T00:46:17,311 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-08T00:46:17,311 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:46:17,311 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T00:46:17,311 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-08T00:46:17,313 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-08T00:46:17,313 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:46:17,313 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T00:46:17,313 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-08T00:46:17,315 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-08T00:46:17,315 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:46:17,315 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T00:46:17,316 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-08T00:46:17,317 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40987/user/jenkins/test-data/ba186fac-90bc-c946-ca0b-016b833ef7f1/data/hbase/meta/1588230740 2024-12-08T00:46:17,317 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40987/user/jenkins/test-data/ba186fac-90bc-c946-ca0b-016b833ef7f1/data/hbase/meta/1588230740 2024-12-08T00:46:17,319 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-08T00:46:17,319 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-08T00:46:17,319 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-08T00:46:17,321 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-08T00:46:17,321 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-08T00:46:17,324 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40987/user/jenkins/test-data/ba186fac-90bc-c946-ca0b-016b833ef7f1/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-08T00:46:17,325 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=65969456, jitterRate=-0.01697850227355957}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-08T00:46:17,328 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733618777305Initializing all the Stores at 1733618777307 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733618777307Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733618777307Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733618777307Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733618777307Cleaning up temporary data from old regions at 1733618777319 (+12 ms)Region opened successfully at 1733618777327 (+8 ms) 2024-12-08T00:46:17,328 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-08T00:46:17,328 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-08T00:46:17,328 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-08T00:46:17,328 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-08T00:46:17,328 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-08T00:46:17,328 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T00:46:17,329 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-08T00:46:17,329 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733618777328Disabling compacts and flushes for region at 1733618777328Disabling writes for close at 1733618777328Writing region close event to WAL at 1733618777329 (+1 ms)Closed at 1733618777329 2024-12-08T00:46:17,331 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-08T00:46:17,331 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-08T00:46:17,331 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-08T00:46:17,333 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-08T00:46:17,336 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-08T00:46:17,344 INFO [RS:1;0f983e3e5be1:39157 {}] regionserver.HRegionServer(746): ClusterId : cb67eef0-d80a-4034-9c12-4df4f1430ac1 2024-12-08T00:46:17,344 INFO [RS:2;0f983e3e5be1:42617 {}] regionserver.HRegionServer(746): ClusterId : cb67eef0-d80a-4034-9c12-4df4f1430ac1 2024-12-08T00:46:17,344 DEBUG [RS:1;0f983e3e5be1:39157 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-08T00:46:17,344 DEBUG [RS:2;0f983e3e5be1:42617 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-08T00:46:17,344 INFO [RS:0;0f983e3e5be1:41367 {}] regionserver.HRegionServer(746): ClusterId : cb67eef0-d80a-4034-9c12-4df4f1430ac1 2024-12-08T00:46:17,344 DEBUG [RS:0;0f983e3e5be1:41367 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-08T00:46:17,368 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T00:46:17,369 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T00:46:17,376 DEBUG [RS:2;0f983e3e5be1:42617 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-08T00:46:17,376 DEBUG [RS:1;0f983e3e5be1:39157 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-08T00:46:17,376 DEBUG [RS:2;0f983e3e5be1:42617 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-08T00:46:17,376 DEBUG [RS:1;0f983e3e5be1:39157 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-08T00:46:17,376 DEBUG [RS:0;0f983e3e5be1:41367 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-08T00:46:17,376 DEBUG [RS:0;0f983e3e5be1:41367 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-08T00:46:17,395 DEBUG [RS:1;0f983e3e5be1:39157 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-08T00:46:17,396 DEBUG [RS:2;0f983e3e5be1:42617 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-08T00:46:17,396 DEBUG [RS:0;0f983e3e5be1:41367 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-08T00:46:17,396 DEBUG [RS:0;0f983e3e5be1:41367 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@47c37a6f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=0f983e3e5be1/172.17.0.2:0 2024-12-08T00:46:17,396 DEBUG [RS:2;0f983e3e5be1:42617 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@439a1743, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=0f983e3e5be1/172.17.0.2:0 2024-12-08T00:46:17,396 DEBUG [RS:1;0f983e3e5be1:39157 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2011674c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=0f983e3e5be1/172.17.0.2:0 2024-12-08T00:46:17,406 DEBUG [RS:0;0f983e3e5be1:41367 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;0f983e3e5be1:41367 2024-12-08T00:46:17,406 INFO [RS:0;0f983e3e5be1:41367 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-08T00:46:17,406 INFO [RS:0;0f983e3e5be1:41367 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-08T00:46:17,406 DEBUG [RS:0;0f983e3e5be1:41367 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-08T00:46:17,406 DEBUG [RS:2;0f983e3e5be1:42617 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:2;0f983e3e5be1:42617 2024-12-08T00:46:17,407 INFO [RS:2;0f983e3e5be1:42617 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-08T00:46:17,407 INFO [RS:2;0f983e3e5be1:42617 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-08T00:46:17,407 DEBUG [RS:2;0f983e3e5be1:42617 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-08T00:46:17,407 INFO [RS:0;0f983e3e5be1:41367 {}] regionserver.HRegionServer(2659): reportForDuty to master=0f983e3e5be1,42509,1733618776699 with port=41367, startcode=1733618776848 2024-12-08T00:46:17,407 DEBUG [RS:0;0f983e3e5be1:41367 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-08T00:46:17,407 INFO [RS:2;0f983e3e5be1:42617 {}] regionserver.HRegionServer(2659): reportForDuty to master=0f983e3e5be1,42509,1733618776699 with port=42617, startcode=1733618776920 2024-12-08T00:46:17,408 DEBUG [RS:2;0f983e3e5be1:42617 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-08T00:46:17,409 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55445, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.3 (auth:SIMPLE), service=RegionServerStatusService 2024-12-08T00:46:17,409 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49761, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.5 (auth:SIMPLE), service=RegionServerStatusService 2024-12-08T00:46:17,410 DEBUG [RS:1;0f983e3e5be1:39157 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;0f983e3e5be1:39157 2024-12-08T00:46:17,410 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42509 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 0f983e3e5be1,41367,1733618776848 2024-12-08T00:46:17,410 INFO [RS:1;0f983e3e5be1:39157 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-08T00:46:17,410 INFO [RS:1;0f983e3e5be1:39157 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-08T00:46:17,410 DEBUG [RS:1;0f983e3e5be1:39157 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-08T00:46:17,410 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42509 {}] master.ServerManager(517): Registering regionserver=0f983e3e5be1,41367,1733618776848 2024-12-08T00:46:17,411 INFO [RS:1;0f983e3e5be1:39157 {}] regionserver.HRegionServer(2659): reportForDuty to master=0f983e3e5be1,42509,1733618776699 with port=39157, startcode=1733618776882 2024-12-08T00:46:17,411 DEBUG [RS:1;0f983e3e5be1:39157 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-08T00:46:17,412 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42509 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 0f983e3e5be1,42617,1733618776920 2024-12-08T00:46:17,412 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42509 {}] master.ServerManager(517): Registering regionserver=0f983e3e5be1,42617,1733618776920 2024-12-08T00:46:17,412 DEBUG [RS:0;0f983e3e5be1:41367 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:40987/user/jenkins/test-data/ba186fac-90bc-c946-ca0b-016b833ef7f1 2024-12-08T00:46:17,412 DEBUG [RS:0;0f983e3e5be1:41367 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:40987 2024-12-08T00:46:17,412 DEBUG [RS:0;0f983e3e5be1:41367 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-08T00:46:17,413 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45245, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.4 (auth:SIMPLE), service=RegionServerStatusService 2024-12-08T00:46:17,414 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42509 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 0f983e3e5be1,39157,1733618776882 2024-12-08T00:46:17,414 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42509 {}] master.ServerManager(517): Registering regionserver=0f983e3e5be1,39157,1733618776882 2024-12-08T00:46:17,414 DEBUG [RS:2;0f983e3e5be1:42617 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:40987/user/jenkins/test-data/ba186fac-90bc-c946-ca0b-016b833ef7f1 2024-12-08T00:46:17,414 DEBUG [RS:2;0f983e3e5be1:42617 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:40987 2024-12-08T00:46:17,414 DEBUG [RS:2;0f983e3e5be1:42617 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-08T00:46:17,416 DEBUG [RS:1;0f983e3e5be1:39157 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:40987/user/jenkins/test-data/ba186fac-90bc-c946-ca0b-016b833ef7f1 2024-12-08T00:46:17,416 DEBUG [RS:1;0f983e3e5be1:39157 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:40987 2024-12-08T00:46:17,416 DEBUG [RS:1;0f983e3e5be1:39157 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-08T00:46:17,420 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42509-0x10002f0f9030000, quorum=127.0.0.1:58726, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-08T00:46:17,449 DEBUG [RS:0;0f983e3e5be1:41367 {}] zookeeper.ZKUtil(111): regionserver:41367-0x10002f0f9030001, quorum=127.0.0.1:58726, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/0f983e3e5be1,41367,1733618776848 2024-12-08T00:46:17,449 WARN [RS:0;0f983e3e5be1:41367 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-08T00:46:17,449 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [0f983e3e5be1,39157,1733618776882] 2024-12-08T00:46:17,450 INFO [RS:0;0f983e3e5be1:41367 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-08T00:46:17,450 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [0f983e3e5be1,42617,1733618776920] 2024-12-08T00:46:17,450 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [0f983e3e5be1,41367,1733618776848] 2024-12-08T00:46:17,450 DEBUG [RS:2;0f983e3e5be1:42617 {}] zookeeper.ZKUtil(111): regionserver:42617-0x10002f0f9030003, quorum=127.0.0.1:58726, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/0f983e3e5be1,42617,1733618776920 2024-12-08T00:46:17,450 DEBUG [RS:0;0f983e3e5be1:41367 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:40987/user/jenkins/test-data/ba186fac-90bc-c946-ca0b-016b833ef7f1/WALs/0f983e3e5be1,41367,1733618776848 2024-12-08T00:46:17,450 DEBUG [RS:1;0f983e3e5be1:39157 {}] zookeeper.ZKUtil(111): regionserver:39157-0x10002f0f9030002, quorum=127.0.0.1:58726, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/0f983e3e5be1,39157,1733618776882 2024-12-08T00:46:17,450 WARN [RS:2;0f983e3e5be1:42617 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-08T00:46:17,450 WARN [RS:1;0f983e3e5be1:39157 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-08T00:46:17,450 INFO [RS:2;0f983e3e5be1:42617 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-08T00:46:17,450 INFO [RS:1;0f983e3e5be1:39157 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-08T00:46:17,450 DEBUG [RS:2;0f983e3e5be1:42617 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:40987/user/jenkins/test-data/ba186fac-90bc-c946-ca0b-016b833ef7f1/WALs/0f983e3e5be1,42617,1733618776920 2024-12-08T00:46:17,450 DEBUG [RS:1;0f983e3e5be1:39157 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:40987/user/jenkins/test-data/ba186fac-90bc-c946-ca0b-016b833ef7f1/WALs/0f983e3e5be1,39157,1733618776882 2024-12-08T00:46:17,457 INFO [RS:2;0f983e3e5be1:42617 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-08T00:46:17,457 INFO [RS:0;0f983e3e5be1:41367 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-08T00:46:17,457 INFO [RS:1;0f983e3e5be1:39157 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-08T00:46:17,461 INFO [RS:2;0f983e3e5be1:42617 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-08T00:46:17,461 INFO [RS:2;0f983e3e5be1:42617 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-08T00:46:17,461 INFO [RS:2;0f983e3e5be1:42617 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-08T00:46:17,462 INFO [RS:2;0f983e3e5be1:42617 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-08T00:46:17,463 INFO [RS:2;0f983e3e5be1:42617 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-08T00:46:17,463 INFO [RS:2;0f983e3e5be1:42617 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-08T00:46:17,463 DEBUG [RS:2;0f983e3e5be1:42617 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/0f983e3e5be1:0, corePoolSize=1, maxPoolSize=1 2024-12-08T00:46:17,463 DEBUG [RS:2;0f983e3e5be1:42617 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/0f983e3e5be1:0, corePoolSize=1, maxPoolSize=1 2024-12-08T00:46:17,463 DEBUG [RS:2;0f983e3e5be1:42617 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/0f983e3e5be1:0, corePoolSize=1, maxPoolSize=1 2024-12-08T00:46:17,463 DEBUG [RS:2;0f983e3e5be1:42617 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/0f983e3e5be1:0, corePoolSize=1, maxPoolSize=1 2024-12-08T00:46:17,464 DEBUG [RS:2;0f983e3e5be1:42617 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/0f983e3e5be1:0, corePoolSize=1, maxPoolSize=1 2024-12-08T00:46:17,464 DEBUG [RS:2;0f983e3e5be1:42617 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/0f983e3e5be1:0, corePoolSize=2, maxPoolSize=2 2024-12-08T00:46:17,464 DEBUG [RS:2;0f983e3e5be1:42617 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/0f983e3e5be1:0, corePoolSize=1, maxPoolSize=1 2024-12-08T00:46:17,464 DEBUG [RS:2;0f983e3e5be1:42617 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/0f983e3e5be1:0, corePoolSize=1, maxPoolSize=1 2024-12-08T00:46:17,464 DEBUG [RS:2;0f983e3e5be1:42617 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/0f983e3e5be1:0, corePoolSize=1, maxPoolSize=1 2024-12-08T00:46:17,464 DEBUG [RS:2;0f983e3e5be1:42617 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/0f983e3e5be1:0, corePoolSize=1, maxPoolSize=1 2024-12-08T00:46:17,464 DEBUG [RS:2;0f983e3e5be1:42617 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/0f983e3e5be1:0, corePoolSize=1, maxPoolSize=1 2024-12-08T00:46:17,464 DEBUG [RS:2;0f983e3e5be1:42617 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/0f983e3e5be1:0, corePoolSize=1, maxPoolSize=1 2024-12-08T00:46:17,464 DEBUG [RS:2;0f983e3e5be1:42617 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/0f983e3e5be1:0, corePoolSize=3, maxPoolSize=3 2024-12-08T00:46:17,465 DEBUG [RS:2;0f983e3e5be1:42617 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/0f983e3e5be1:0, corePoolSize=3, maxPoolSize=3 2024-12-08T00:46:17,465 INFO [RS:1;0f983e3e5be1:39157 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-08T00:46:17,465 INFO [RS:0;0f983e3e5be1:41367 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-08T00:46:17,467 INFO [RS:1;0f983e3e5be1:39157 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-08T00:46:17,467 INFO [RS:0;0f983e3e5be1:41367 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-08T00:46:17,467 INFO [RS:0;0f983e3e5be1:41367 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-08T00:46:17,467 INFO [RS:1;0f983e3e5be1:39157 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-08T00:46:17,467 INFO [RS:2;0f983e3e5be1:42617 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-08T00:46:17,467 INFO [RS:2;0f983e3e5be1:42617 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-08T00:46:17,467 INFO [RS:2;0f983e3e5be1:42617 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-08T00:46:17,468 INFO [RS:2;0f983e3e5be1:42617 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-08T00:46:17,468 INFO [RS:2;0f983e3e5be1:42617 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-08T00:46:17,468 INFO [RS:2;0f983e3e5be1:42617 {}] hbase.ChoreService(168): Chore ScheduledChore name=0f983e3e5be1,42617,1733618776920-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-08T00:46:17,470 INFO [RS:1;0f983e3e5be1:39157 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-08T00:46:17,470 INFO [RS:0;0f983e3e5be1:41367 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-08T00:46:17,471 INFO [RS:1;0f983e3e5be1:39157 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-08T00:46:17,471 INFO [RS:0;0f983e3e5be1:41367 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-08T00:46:17,471 INFO [RS:1;0f983e3e5be1:39157 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-08T00:46:17,471 INFO [RS:0;0f983e3e5be1:41367 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-08T00:46:17,471 DEBUG [RS:1;0f983e3e5be1:39157 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/0f983e3e5be1:0, corePoolSize=1, maxPoolSize=1 2024-12-08T00:46:17,471 DEBUG [RS:0;0f983e3e5be1:41367 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/0f983e3e5be1:0, corePoolSize=1, maxPoolSize=1 2024-12-08T00:46:17,472 DEBUG [RS:1;0f983e3e5be1:39157 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/0f983e3e5be1:0, corePoolSize=1, maxPoolSize=1 2024-12-08T00:46:17,472 DEBUG [RS:0;0f983e3e5be1:41367 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/0f983e3e5be1:0, corePoolSize=1, maxPoolSize=1 2024-12-08T00:46:17,472 DEBUG [RS:1;0f983e3e5be1:39157 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/0f983e3e5be1:0, corePoolSize=1, maxPoolSize=1 2024-12-08T00:46:17,472 DEBUG [RS:1;0f983e3e5be1:39157 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/0f983e3e5be1:0, corePoolSize=1, maxPoolSize=1 2024-12-08T00:46:17,472 DEBUG [RS:0;0f983e3e5be1:41367 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/0f983e3e5be1:0, corePoolSize=1, maxPoolSize=1 2024-12-08T00:46:17,472 DEBUG [RS:1;0f983e3e5be1:39157 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/0f983e3e5be1:0, corePoolSize=1, maxPoolSize=1 2024-12-08T00:46:17,472 DEBUG [RS:0;0f983e3e5be1:41367 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/0f983e3e5be1:0, corePoolSize=1, maxPoolSize=1 2024-12-08T00:46:17,472 DEBUG [RS:1;0f983e3e5be1:39157 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/0f983e3e5be1:0, corePoolSize=2, maxPoolSize=2 2024-12-08T00:46:17,472 DEBUG [RS:1;0f983e3e5be1:39157 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/0f983e3e5be1:0, corePoolSize=1, maxPoolSize=1 2024-12-08T00:46:17,472 DEBUG [RS:0;0f983e3e5be1:41367 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/0f983e3e5be1:0, corePoolSize=1, maxPoolSize=1 2024-12-08T00:46:17,472 DEBUG [RS:1;0f983e3e5be1:39157 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/0f983e3e5be1:0, corePoolSize=1, maxPoolSize=1 2024-12-08T00:46:17,472 DEBUG [RS:0;0f983e3e5be1:41367 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/0f983e3e5be1:0, corePoolSize=2, maxPoolSize=2 2024-12-08T00:46:17,472 DEBUG [RS:1;0f983e3e5be1:39157 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/0f983e3e5be1:0, corePoolSize=1, maxPoolSize=1 2024-12-08T00:46:17,472 DEBUG [RS:1;0f983e3e5be1:39157 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/0f983e3e5be1:0, corePoolSize=1, maxPoolSize=1 2024-12-08T00:46:17,472 DEBUG [RS:0;0f983e3e5be1:41367 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/0f983e3e5be1:0, corePoolSize=1, maxPoolSize=1 2024-12-08T00:46:17,472 DEBUG [RS:1;0f983e3e5be1:39157 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/0f983e3e5be1:0, corePoolSize=1, maxPoolSize=1 2024-12-08T00:46:17,472 DEBUG [RS:0;0f983e3e5be1:41367 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/0f983e3e5be1:0, corePoolSize=1, maxPoolSize=1 2024-12-08T00:46:17,472 DEBUG [RS:1;0f983e3e5be1:39157 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/0f983e3e5be1:0, corePoolSize=1, maxPoolSize=1 2024-12-08T00:46:17,472 DEBUG [RS:0;0f983e3e5be1:41367 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/0f983e3e5be1:0, corePoolSize=1, maxPoolSize=1 2024-12-08T00:46:17,472 DEBUG [RS:1;0f983e3e5be1:39157 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/0f983e3e5be1:0, corePoolSize=3, maxPoolSize=3 2024-12-08T00:46:17,472 DEBUG [RS:0;0f983e3e5be1:41367 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/0f983e3e5be1:0, corePoolSize=1, maxPoolSize=1 2024-12-08T00:46:17,472 DEBUG [RS:1;0f983e3e5be1:39157 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/0f983e3e5be1:0, corePoolSize=3, maxPoolSize=3 2024-12-08T00:46:17,472 DEBUG [RS:0;0f983e3e5be1:41367 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/0f983e3e5be1:0, corePoolSize=1, maxPoolSize=1 2024-12-08T00:46:17,473 DEBUG [RS:0;0f983e3e5be1:41367 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/0f983e3e5be1:0, corePoolSize=1, maxPoolSize=1 2024-12-08T00:46:17,473 DEBUG [RS:0;0f983e3e5be1:41367 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/0f983e3e5be1:0, corePoolSize=3, maxPoolSize=3 2024-12-08T00:46:17,473 DEBUG [RS:0;0f983e3e5be1:41367 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/0f983e3e5be1:0, corePoolSize=3, maxPoolSize=3 2024-12-08T00:46:17,475 INFO [RS:1;0f983e3e5be1:39157 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-08T00:46:17,475 INFO [RS:1;0f983e3e5be1:39157 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-08T00:46:17,475 INFO [RS:1;0f983e3e5be1:39157 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-08T00:46:17,475 INFO [RS:1;0f983e3e5be1:39157 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-08T00:46:17,475 INFO [RS:1;0f983e3e5be1:39157 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-08T00:46:17,475 INFO [RS:1;0f983e3e5be1:39157 {}] hbase.ChoreService(168): Chore ScheduledChore name=0f983e3e5be1,39157,1733618776882-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-08T00:46:17,476 INFO [RS:0;0f983e3e5be1:41367 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-08T00:46:17,476 INFO [RS:0;0f983e3e5be1:41367 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-08T00:46:17,476 INFO [RS:0;0f983e3e5be1:41367 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-08T00:46:17,476 INFO [RS:0;0f983e3e5be1:41367 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-08T00:46:17,476 INFO [RS:0;0f983e3e5be1:41367 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-08T00:46:17,476 INFO [RS:0;0f983e3e5be1:41367 {}] hbase.ChoreService(168): Chore ScheduledChore name=0f983e3e5be1,41367,1733618776848-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-08T00:46:17,486 WARN [0f983e3e5be1:42509 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-08T00:46:17,486 INFO [RS:2;0f983e3e5be1:42617 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-08T00:46:17,487 INFO [RS:2;0f983e3e5be1:42617 {}] hbase.ChoreService(168): Chore ScheduledChore name=0f983e3e5be1,42617,1733618776920-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-08T00:46:17,487 INFO [RS:2;0f983e3e5be1:42617 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T00:46:17,487 INFO [RS:2;0f983e3e5be1:42617 {}] regionserver.Replication(171): 0f983e3e5be1,42617,1733618776920 started 2024-12-08T00:46:17,488 INFO [RS:1;0f983e3e5be1:39157 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-08T00:46:17,488 INFO [RS:1;0f983e3e5be1:39157 {}] hbase.ChoreService(168): Chore ScheduledChore name=0f983e3e5be1,39157,1733618776882-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-08T00:46:17,488 INFO [RS:1;0f983e3e5be1:39157 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T00:46:17,488 INFO [RS:1;0f983e3e5be1:39157 {}] regionserver.Replication(171): 0f983e3e5be1,39157,1733618776882 started 2024-12-08T00:46:17,494 INFO [RS:0;0f983e3e5be1:41367 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-08T00:46:17,494 INFO [RS:0;0f983e3e5be1:41367 {}] hbase.ChoreService(168): Chore ScheduledChore name=0f983e3e5be1,41367,1733618776848-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-08T00:46:17,494 INFO [RS:0;0f983e3e5be1:41367 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T00:46:17,494 INFO [RS:0;0f983e3e5be1:41367 {}] regionserver.Replication(171): 0f983e3e5be1,41367,1733618776848 started 2024-12-08T00:46:17,500 INFO [RS:1;0f983e3e5be1:39157 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T00:46:17,500 INFO [RS:1;0f983e3e5be1:39157 {}] regionserver.HRegionServer(1482): Serving as 0f983e3e5be1,39157,1733618776882, RpcServer on 0f983e3e5be1/172.17.0.2:39157, sessionid=0x10002f0f9030002 2024-12-08T00:46:17,500 DEBUG [RS:1;0f983e3e5be1:39157 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-08T00:46:17,500 DEBUG [RS:1;0f983e3e5be1:39157 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 0f983e3e5be1,39157,1733618776882 2024-12-08T00:46:17,500 DEBUG [RS:1;0f983e3e5be1:39157 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '0f983e3e5be1,39157,1733618776882' 2024-12-08T00:46:17,500 DEBUG [RS:1;0f983e3e5be1:39157 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-08T00:46:17,501 DEBUG [RS:1;0f983e3e5be1:39157 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-08T00:46:17,502 DEBUG [RS:1;0f983e3e5be1:39157 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-08T00:46:17,502 DEBUG [RS:1;0f983e3e5be1:39157 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-08T00:46:17,502 DEBUG [RS:1;0f983e3e5be1:39157 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 0f983e3e5be1,39157,1733618776882 2024-12-08T00:46:17,502 DEBUG [RS:1;0f983e3e5be1:39157 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '0f983e3e5be1,39157,1733618776882' 2024-12-08T00:46:17,502 DEBUG [RS:1;0f983e3e5be1:39157 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-08T00:46:17,502 DEBUG [RS:1;0f983e3e5be1:39157 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-08T00:46:17,503 DEBUG [RS:1;0f983e3e5be1:39157 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-08T00:46:17,503 INFO [RS:1;0f983e3e5be1:39157 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-08T00:46:17,503 INFO [RS:1;0f983e3e5be1:39157 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-08T00:46:17,505 INFO [RS:2;0f983e3e5be1:42617 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T00:46:17,505 INFO [RS:2;0f983e3e5be1:42617 {}] regionserver.HRegionServer(1482): Serving as 0f983e3e5be1,42617,1733618776920, RpcServer on 0f983e3e5be1/172.17.0.2:42617, sessionid=0x10002f0f9030003 2024-12-08T00:46:17,505 DEBUG [RS:2;0f983e3e5be1:42617 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-08T00:46:17,506 DEBUG [RS:2;0f983e3e5be1:42617 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 0f983e3e5be1,42617,1733618776920 2024-12-08T00:46:17,506 DEBUG [RS:2;0f983e3e5be1:42617 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '0f983e3e5be1,42617,1733618776920' 2024-12-08T00:46:17,506 DEBUG [RS:2;0f983e3e5be1:42617 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-08T00:46:17,506 DEBUG [RS:2;0f983e3e5be1:42617 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-08T00:46:17,507 DEBUG [RS:2;0f983e3e5be1:42617 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-08T00:46:17,507 DEBUG [RS:2;0f983e3e5be1:42617 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-08T00:46:17,507 DEBUG [RS:2;0f983e3e5be1:42617 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 0f983e3e5be1,42617,1733618776920 2024-12-08T00:46:17,507 DEBUG [RS:2;0f983e3e5be1:42617 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '0f983e3e5be1,42617,1733618776920' 2024-12-08T00:46:17,507 DEBUG [RS:2;0f983e3e5be1:42617 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-08T00:46:17,507 DEBUG [RS:2;0f983e3e5be1:42617 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-08T00:46:17,508 DEBUG [RS:2;0f983e3e5be1:42617 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-08T00:46:17,508 INFO [RS:2;0f983e3e5be1:42617 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-08T00:46:17,508 INFO [RS:2;0f983e3e5be1:42617 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-08T00:46:17,511 INFO [RS:0;0f983e3e5be1:41367 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T00:46:17,512 INFO [RS:0;0f983e3e5be1:41367 {}] regionserver.HRegionServer(1482): Serving as 0f983e3e5be1,41367,1733618776848, RpcServer on 0f983e3e5be1/172.17.0.2:41367, sessionid=0x10002f0f9030001 2024-12-08T00:46:17,512 DEBUG [RS:0;0f983e3e5be1:41367 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-08T00:46:17,512 DEBUG [RS:0;0f983e3e5be1:41367 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 0f983e3e5be1,41367,1733618776848 2024-12-08T00:46:17,512 DEBUG [RS:0;0f983e3e5be1:41367 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '0f983e3e5be1,41367,1733618776848' 2024-12-08T00:46:17,512 DEBUG [RS:0;0f983e3e5be1:41367 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-08T00:46:17,512 DEBUG [RS:0;0f983e3e5be1:41367 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-08T00:46:17,513 DEBUG [RS:0;0f983e3e5be1:41367 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-08T00:46:17,513 DEBUG [RS:0;0f983e3e5be1:41367 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-08T00:46:17,513 DEBUG [RS:0;0f983e3e5be1:41367 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 0f983e3e5be1,41367,1733618776848 2024-12-08T00:46:17,513 DEBUG [RS:0;0f983e3e5be1:41367 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '0f983e3e5be1,41367,1733618776848' 2024-12-08T00:46:17,513 DEBUG [RS:0;0f983e3e5be1:41367 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-08T00:46:17,513 DEBUG [RS:0;0f983e3e5be1:41367 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-08T00:46:17,514 DEBUG [RS:0;0f983e3e5be1:41367 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-08T00:46:17,514 INFO [RS:0;0f983e3e5be1:41367 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-08T00:46:17,514 INFO [RS:0;0f983e3e5be1:41367 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-08T00:46:17,609 INFO [RS:1;0f983e3e5be1:39157 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=0f983e3e5be1%2C39157%2C1733618776882, suffix=, logDir=hdfs://localhost:40987/user/jenkins/test-data/ba186fac-90bc-c946-ca0b-016b833ef7f1/WALs/0f983e3e5be1,39157,1733618776882, archiveDir=hdfs://localhost:40987/user/jenkins/test-data/ba186fac-90bc-c946-ca0b-016b833ef7f1/oldWALs, maxLogs=32 2024-12-08T00:46:17,613 INFO [RS:2;0f983e3e5be1:42617 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=0f983e3e5be1%2C42617%2C1733618776920, suffix=, logDir=hdfs://localhost:40987/user/jenkins/test-data/ba186fac-90bc-c946-ca0b-016b833ef7f1/WALs/0f983e3e5be1,42617,1733618776920, archiveDir=hdfs://localhost:40987/user/jenkins/test-data/ba186fac-90bc-c946-ca0b-016b833ef7f1/oldWALs, maxLogs=32 2024-12-08T00:46:17,614 INFO [RS:1;0f983e3e5be1:39157 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0f983e3e5be1%2C39157%2C1733618776882.1733618777614 2024-12-08T00:46:17,615 INFO [RS:2;0f983e3e5be1:42617 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0f983e3e5be1%2C42617%2C1733618776920.1733618777615 2024-12-08T00:46:17,617 INFO [RS:0;0f983e3e5be1:41367 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=0f983e3e5be1%2C41367%2C1733618776848, suffix=, logDir=hdfs://localhost:40987/user/jenkins/test-data/ba186fac-90bc-c946-ca0b-016b833ef7f1/WALs/0f983e3e5be1,41367,1733618776848, archiveDir=hdfs://localhost:40987/user/jenkins/test-data/ba186fac-90bc-c946-ca0b-016b833ef7f1/oldWALs, maxLogs=32 2024-12-08T00:46:17,617 INFO [RS:0;0f983e3e5be1:41367 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0f983e3e5be1%2C41367%2C1733618776848.1733618777617 2024-12-08T00:46:17,627 INFO [RS:1;0f983e3e5be1:39157 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/ba186fac-90bc-c946-ca0b-016b833ef7f1/WALs/0f983e3e5be1,39157,1733618776882/0f983e3e5be1%2C39157%2C1733618776882.1733618777614 2024-12-08T00:46:17,629 DEBUG [RS:1;0f983e3e5be1:39157 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34185:34185),(127.0.0.1/127.0.0.1:44893:44893),(127.0.0.1/127.0.0.1:43313:43313)] 2024-12-08T00:46:17,629 INFO [RS:2;0f983e3e5be1:42617 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/ba186fac-90bc-c946-ca0b-016b833ef7f1/WALs/0f983e3e5be1,42617,1733618776920/0f983e3e5be1%2C42617%2C1733618776920.1733618777615 2024-12-08T00:46:17,631 DEBUG [RS:2;0f983e3e5be1:42617 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34185:34185),(127.0.0.1/127.0.0.1:44893:44893),(127.0.0.1/127.0.0.1:43313:43313)] 2024-12-08T00:46:17,632 INFO [RS:0;0f983e3e5be1:41367 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/ba186fac-90bc-c946-ca0b-016b833ef7f1/WALs/0f983e3e5be1,41367,1733618776848/0f983e3e5be1%2C41367%2C1733618776848.1733618777617 2024-12-08T00:46:17,633 DEBUG [RS:0;0f983e3e5be1:41367 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34185:34185),(127.0.0.1/127.0.0.1:43313:43313),(127.0.0.1/127.0.0.1:44893:44893)] 2024-12-08T00:46:17,736 DEBUG [0f983e3e5be1:42509 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=3, allServersCount=3 2024-12-08T00:46:17,737 DEBUG [0f983e3e5be1:42509 {}] balancer.BalancerClusterState(204): Hosts are {0f983e3e5be1=0} racks are {/default-rack=0} 2024-12-08T00:46:17,741 DEBUG [0f983e3e5be1:42509 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-08T00:46:17,741 DEBUG [0f983e3e5be1:42509 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-08T00:46:17,741 DEBUG [0f983e3e5be1:42509 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-08T00:46:17,741 DEBUG [0f983e3e5be1:42509 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-08T00:46:17,741 DEBUG [0f983e3e5be1:42509 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-08T00:46:17,741 DEBUG [0f983e3e5be1:42509 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-08T00:46:17,741 INFO [0f983e3e5be1:42509 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-08T00:46:17,741 INFO [0f983e3e5be1:42509 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-08T00:46:17,741 INFO [0f983e3e5be1:42509 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-08T00:46:17,741 DEBUG [0f983e3e5be1:42509 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-08T00:46:17,742 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=0f983e3e5be1,39157,1733618776882 2024-12-08T00:46:17,744 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 0f983e3e5be1,39157,1733618776882, state=OPENING 2024-12-08T00:46:17,770 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-08T00:46:17,778 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42617-0x10002f0f9030003, quorum=127.0.0.1:58726, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T00:46:17,778 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41367-0x10002f0f9030001, quorum=127.0.0.1:58726, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T00:46:17,778 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39157-0x10002f0f9030002, quorum=127.0.0.1:58726, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T00:46:17,778 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42509-0x10002f0f9030000, quorum=127.0.0.1:58726, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T00:46:17,780 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-08T00:46:17,780 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-08T00:46:17,780 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-08T00:46:17,780 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=0f983e3e5be1,39157,1733618776882}] 2024-12-08T00:46:17,780 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-08T00:46:17,780 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-08T00:46:17,938 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-08T00:46:17,942 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-9-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50301, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-08T00:46:17,949 INFO [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-08T00:46:17,950 INFO [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-08T00:46:17,953 INFO [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=0f983e3e5be1%2C39157%2C1733618776882.meta, suffix=.meta, logDir=hdfs://localhost:40987/user/jenkins/test-data/ba186fac-90bc-c946-ca0b-016b833ef7f1/WALs/0f983e3e5be1,39157,1733618776882, archiveDir=hdfs://localhost:40987/user/jenkins/test-data/ba186fac-90bc-c946-ca0b-016b833ef7f1/oldWALs, maxLogs=32 2024-12-08T00:46:17,953 INFO [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 0f983e3e5be1%2C39157%2C1733618776882.meta.1733618777953.meta 2024-12-08T00:46:17,964 INFO [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/ba186fac-90bc-c946-ca0b-016b833ef7f1/WALs/0f983e3e5be1,39157,1733618776882/0f983e3e5be1%2C39157%2C1733618776882.meta.1733618777953.meta 2024-12-08T00:46:17,965 DEBUG [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43313:43313),(127.0.0.1/127.0.0.1:44893:44893),(127.0.0.1/127.0.0.1:34185:34185)] 2024-12-08T00:46:17,969 DEBUG [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-08T00:46:17,969 DEBUG [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-08T00:46:17,969 DEBUG [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-08T00:46:17,970 INFO [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-08T00:46:17,970 DEBUG [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-08T00:46:17,970 DEBUG [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T00:46:17,970 DEBUG [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-08T00:46:17,970 DEBUG [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-08T00:46:17,974 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-08T00:46:17,975 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-08T00:46:17,975 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:46:17,976 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T00:46:17,976 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-08T00:46:17,977 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-08T00:46:17,977 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:46:17,978 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T00:46:17,978 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-08T00:46:17,979 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-08T00:46:17,979 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:46:17,979 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T00:46:17,980 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-08T00:46:17,981 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-08T00:46:17,981 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:46:17,981 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T00:46:17,981 DEBUG [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-08T00:46:17,982 DEBUG [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40987/user/jenkins/test-data/ba186fac-90bc-c946-ca0b-016b833ef7f1/data/hbase/meta/1588230740 2024-12-08T00:46:17,984 DEBUG [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40987/user/jenkins/test-data/ba186fac-90bc-c946-ca0b-016b833ef7f1/data/hbase/meta/1588230740 2024-12-08T00:46:17,986 DEBUG [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-08T00:46:17,986 DEBUG [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-08T00:46:17,986 DEBUG [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-08T00:46:17,988 DEBUG [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-08T00:46:17,989 INFO [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=65077264, jitterRate=-0.0302731990814209}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-08T00:46:17,989 DEBUG [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-08T00:46:17,990 DEBUG [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733618777970Writing region info on filesystem at 1733618777970Initializing all the Stores at 1733618777971 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733618777971Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733618777974 (+3 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733618777974Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733618777974Cleaning up temporary data from old regions at 1733618777986 (+12 ms)Running coprocessor post-open hooks at 1733618777989 (+3 ms)Region opened successfully at 1733618777989 2024-12-08T00:46:17,991 INFO [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733618777937 2024-12-08T00:46:17,994 DEBUG [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-08T00:46:17,994 INFO [RS_OPEN_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-08T00:46:17,995 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=0f983e3e5be1,39157,1733618776882 2024-12-08T00:46:17,997 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 0f983e3e5be1,39157,1733618776882, state=OPEN 2024-12-08T00:46:18,012 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42617-0x10002f0f9030003, quorum=127.0.0.1:58726, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-08T00:46:18,012 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39157-0x10002f0f9030002, quorum=127.0.0.1:58726, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-08T00:46:18,012 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42509-0x10002f0f9030000, quorum=127.0.0.1:58726, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-08T00:46:18,012 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41367-0x10002f0f9030001, quorum=127.0.0.1:58726, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-08T00:46:18,012 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=0f983e3e5be1,39157,1733618776882 2024-12-08T00:46:18,012 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-08T00:46:18,012 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-08T00:46:18,012 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-08T00:46:18,012 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-08T00:46:18,016 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-08T00:46:18,016 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=0f983e3e5be1,39157,1733618776882 in 232 msec 2024-12-08T00:46:18,020 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-08T00:46:18,020 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 685 msec 2024-12-08T00:46:18,021 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-08T00:46:18,021 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-08T00:46:18,022 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-08T00:46:18,022 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=0f983e3e5be1,39157,1733618776882, seqNum=-1] 2024-12-08T00:46:18,022 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T00:46:18,024 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-9-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48053, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T00:46:18,031 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 756 msec 2024-12-08T00:46:18,032 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733618778032, completionTime=-1 2024-12-08T00:46:18,032 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=3; waited=0ms, expected min=3 server(s), max=3 server(s), master is running 2024-12-08T00:46:18,032 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-08T00:46:18,034 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=3 2024-12-08T00:46:18,034 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733618838034 2024-12-08T00:46:18,034 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733618898034 2024-12-08T00:46:18,034 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-12-08T00:46:18,035 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0f983e3e5be1,42509,1733618776699-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-08T00:46:18,035 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0f983e3e5be1,42509,1733618776699-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T00:46:18,035 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0f983e3e5be1,42509,1733618776699-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T00:46:18,035 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-0f983e3e5be1:42509, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T00:46:18,035 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-08T00:46:18,036 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-08T00:46:18,038 DEBUG [master/0f983e3e5be1:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-08T00:46:18,041 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.062sec 2024-12-08T00:46:18,041 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-08T00:46:18,041 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-08T00:46:18,041 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-08T00:46:18,041 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-08T00:46:18,041 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-08T00:46:18,042 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0f983e3e5be1,42509,1733618776699-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-08T00:46:18,042 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0f983e3e5be1,42509,1733618776699-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-08T00:46:18,045 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@10f5e381, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T00:46:18,045 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 0f983e3e5be1,42509,-1 for getting cluster id 2024-12-08T00:46:18,045 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-08T00:46:18,045 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-08T00:46:18,045 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-08T00:46:18,046 INFO [master/0f983e3e5be1:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0f983e3e5be1,42509,1733618776699-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T00:46:18,047 DEBUG [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'cb67eef0-d80a-4034-9c12-4df4f1430ac1' 2024-12-08T00:46:18,048 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-08T00:46:18,048 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "cb67eef0-d80a-4034-9c12-4df4f1430ac1" 2024-12-08T00:46:18,048 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4896dbb1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T00:46:18,049 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [0f983e3e5be1,42509,-1] 2024-12-08T00:46:18,049 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-08T00:46:18,049 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T00:46:18,051 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45324, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-08T00:46:18,052 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@70fbe73e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T00:46:18,053 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-08T00:46:18,054 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=0f983e3e5be1,39157,1733618776882, seqNum=-1] 2024-12-08T00:46:18,054 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T00:46:18,056 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-9-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34232, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T00:46:18,058 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=0f983e3e5be1,42509,1733618776699 2024-12-08T00:46:18,059 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-08T00:46:18,060 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] client.AsyncConnectionImpl(321): The fetched master address is 0f983e3e5be1,42509,1733618776699 2024-12-08T00:46:18,060 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@5d1cc787 2024-12-08T00:46:18,061 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-08T00:46:18,063 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45330, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-08T00:46:18,064 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42509 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-08T00:46:18,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42509 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC 2024-12-08T00:46:18,068 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_PRE_OPERATION 2024-12-08T00:46:18,068 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:46:18,068 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42509 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestHBaseWalOnEC" procId is: 4 2024-12-08T00:46:18,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42509 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-08T00:46:18,070 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-08T00:46:18,079 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44495 is added to blk_1073741837_1013 (size=392) 2024-12-08T00:46:18,079 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38147 is added to blk_1073741837_1013 (size=392) 2024-12-08T00:46:18,080 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42107 is added to blk_1073741837_1013 (size=392) 2024-12-08T00:46:18,082 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => e1c6a0733dd237d94d427d22165b3d2a, NAME => 'TestHBaseWalOnEC,,1733618778063.e1c6a0733dd237d94d427d22165b3d2a.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:40987/user/jenkins/test-data/ba186fac-90bc-c946-ca0b-016b833ef7f1 2024-12-08T00:46:18,090 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42107 is added to blk_1073741838_1014 (size=51) 2024-12-08T00:46:18,090 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38147 is added to blk_1073741838_1014 (size=51) 2024-12-08T00:46:18,090 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44495 is added to blk_1073741838_1014 (size=51) 2024-12-08T00:46:18,091 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1733618778063.e1c6a0733dd237d94d427d22165b3d2a.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T00:46:18,091 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1722): Closing e1c6a0733dd237d94d427d22165b3d2a, disabling compactions & flushes 2024-12-08T00:46:18,091 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1733618778063.e1c6a0733dd237d94d427d22165b3d2a. 2024-12-08T00:46:18,091 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1733618778063.e1c6a0733dd237d94d427d22165b3d2a. 2024-12-08T00:46:18,091 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1733618778063.e1c6a0733dd237d94d427d22165b3d2a. after waiting 0 ms 2024-12-08T00:46:18,091 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1733618778063.e1c6a0733dd237d94d427d22165b3d2a. 2024-12-08T00:46:18,091 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1733618778063.e1c6a0733dd237d94d427d22165b3d2a. 2024-12-08T00:46:18,091 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1676): Region close journal for e1c6a0733dd237d94d427d22165b3d2a: Waiting for close lock at 1733618778091Disabling compacts and flushes for region at 1733618778091Disabling writes for close at 1733618778091Writing region close event to WAL at 1733618778091Closed at 1733618778091 2024-12-08T00:46:18,093 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ADD_TO_META 2024-12-08T00:46:18,093 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestHBaseWalOnEC,,1733618778063.e1c6a0733dd237d94d427d22165b3d2a.","families":{"info":[{"qualifier":"regioninfo","vlen":50,"tag":[],"timestamp":"1733618778093"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733618778093"}]},"ts":"1733618778093"} 2024-12-08T00:46:18,096 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-08T00:46:18,098 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-08T00:46:18,098 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733618778098"}]},"ts":"1733618778098"} 2024-12-08T00:46:18,101 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLING in hbase:meta 2024-12-08T00:46:18,101 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {0f983e3e5be1=0} racks are {/default-rack=0} 2024-12-08T00:46:18,102 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-08T00:46:18,102 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-08T00:46:18,102 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-08T00:46:18,102 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-08T00:46:18,102 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-08T00:46:18,102 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-08T00:46:18,102 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-08T00:46:18,102 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-08T00:46:18,103 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-08T00:46:18,103 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-08T00:46:18,103 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=e1c6a0733dd237d94d427d22165b3d2a, ASSIGN}] 2024-12-08T00:46:18,105 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=e1c6a0733dd237d94d427d22165b3d2a, ASSIGN 2024-12-08T00:46:18,107 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=e1c6a0733dd237d94d427d22165b3d2a, ASSIGN; state=OFFLINE, location=0f983e3e5be1,39157,1733618776882; forceNewPlan=false, retain=false 2024-12-08T00:46:18,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42509 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-08T00:46:18,258 INFO [0f983e3e5be1:42509 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-08T00:46:18,258 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=e1c6a0733dd237d94d427d22165b3d2a, regionState=OPENING, regionLocation=0f983e3e5be1,39157,1733618776882 2024-12-08T00:46:18,267 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-10-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=e1c6a0733dd237d94d427d22165b3d2a, ASSIGN because future has completed 2024-12-08T00:46:18,268 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure e1c6a0733dd237d94d427d22165b3d2a, server=0f983e3e5be1,39157,1733618776882}] 2024-12-08T00:46:18,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42509 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-08T00:46:18,434 INFO [RS_OPEN_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestHBaseWalOnEC,,1733618778063.e1c6a0733dd237d94d427d22165b3d2a. 2024-12-08T00:46:18,434 DEBUG [RS_OPEN_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => e1c6a0733dd237d94d427d22165b3d2a, NAME => 'TestHBaseWalOnEC,,1733618778063.e1c6a0733dd237d94d427d22165b3d2a.', STARTKEY => '', ENDKEY => ''} 2024-12-08T00:46:18,435 DEBUG [RS_OPEN_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestHBaseWalOnEC e1c6a0733dd237d94d427d22165b3d2a 2024-12-08T00:46:18,435 DEBUG [RS_OPEN_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1733618778063.e1c6a0733dd237d94d427d22165b3d2a.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T00:46:18,435 DEBUG [RS_OPEN_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for e1c6a0733dd237d94d427d22165b3d2a 2024-12-08T00:46:18,435 DEBUG [RS_OPEN_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for e1c6a0733dd237d94d427d22165b3d2a 2024-12-08T00:46:18,438 INFO [StoreOpener-e1c6a0733dd237d94d427d22165b3d2a-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region e1c6a0733dd237d94d427d22165b3d2a 2024-12-08T00:46:18,441 INFO [StoreOpener-e1c6a0733dd237d94d427d22165b3d2a-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region e1c6a0733dd237d94d427d22165b3d2a columnFamilyName cf 2024-12-08T00:46:18,441 DEBUG [StoreOpener-e1c6a0733dd237d94d427d22165b3d2a-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T00:46:18,441 INFO [StoreOpener-e1c6a0733dd237d94d427d22165b3d2a-1 {}] regionserver.HStore(327): Store=e1c6a0733dd237d94d427d22165b3d2a/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-08T00:46:18,442 DEBUG [RS_OPEN_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for e1c6a0733dd237d94d427d22165b3d2a 2024-12-08T00:46:18,442 DEBUG [RS_OPEN_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40987/user/jenkins/test-data/ba186fac-90bc-c946-ca0b-016b833ef7f1/data/default/TestHBaseWalOnEC/e1c6a0733dd237d94d427d22165b3d2a 2024-12-08T00:46:18,443 DEBUG [RS_OPEN_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40987/user/jenkins/test-data/ba186fac-90bc-c946-ca0b-016b833ef7f1/data/default/TestHBaseWalOnEC/e1c6a0733dd237d94d427d22165b3d2a 2024-12-08T00:46:18,443 DEBUG [RS_OPEN_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for e1c6a0733dd237d94d427d22165b3d2a 2024-12-08T00:46:18,443 DEBUG [RS_OPEN_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for e1c6a0733dd237d94d427d22165b3d2a 2024-12-08T00:46:18,445 DEBUG [RS_OPEN_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for e1c6a0733dd237d94d427d22165b3d2a 2024-12-08T00:46:18,448 DEBUG [RS_OPEN_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40987/user/jenkins/test-data/ba186fac-90bc-c946-ca0b-016b833ef7f1/data/default/TestHBaseWalOnEC/e1c6a0733dd237d94d427d22165b3d2a/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-08T00:46:18,449 INFO [RS_OPEN_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened e1c6a0733dd237d94d427d22165b3d2a; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=61418707, jitterRate=-0.0847899466753006}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-08T00:46:18,449 DEBUG [RS_OPEN_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for e1c6a0733dd237d94d427d22165b3d2a 2024-12-08T00:46:18,449 DEBUG [RS_OPEN_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for e1c6a0733dd237d94d427d22165b3d2a: Running coprocessor pre-open hook at 1733618778436Writing region info on filesystem at 1733618778436Initializing all the Stores at 1733618778437 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733618778438 (+1 ms)Cleaning up temporary data from old regions at 1733618778443 (+5 ms)Running coprocessor post-open hooks at 1733618778449 (+6 ms)Region opened successfully at 1733618778449 2024-12-08T00:46:18,451 INFO [RS_OPEN_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestHBaseWalOnEC,,1733618778063.e1c6a0733dd237d94d427d22165b3d2a., pid=6, masterSystemTime=1733618778422 2024-12-08T00:46:18,454 DEBUG [RS_OPEN_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestHBaseWalOnEC,,1733618778063.e1c6a0733dd237d94d427d22165b3d2a. 2024-12-08T00:46:18,454 INFO [RS_OPEN_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestHBaseWalOnEC,,1733618778063.e1c6a0733dd237d94d427d22165b3d2a. 2024-12-08T00:46:18,455 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=e1c6a0733dd237d94d427d22165b3d2a, regionState=OPEN, openSeqNum=2, regionLocation=0f983e3e5be1,39157,1733618776882 2024-12-08T00:46:18,458 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-10-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure e1c6a0733dd237d94d427d22165b3d2a, server=0f983e3e5be1,39157,1733618776882 because future has completed 2024-12-08T00:46:18,463 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-08T00:46:18,463 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure e1c6a0733dd237d94d427d22165b3d2a, server=0f983e3e5be1,39157,1733618776882 in 192 msec 2024-12-08T00:46:18,468 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-08T00:46:18,468 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=e1c6a0733dd237d94d427d22165b3d2a, ASSIGN in 360 msec 2024-12-08T00:46:18,469 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-08T00:46:18,469 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733618778469"}]},"ts":"1733618778469"} 2024-12-08T00:46:18,472 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLED in hbase:meta 2024-12-08T00:46:18,474 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_POST_OPERATION 2024-12-08T00:46:18,477 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC in 410 msec 2024-12-08T00:46:18,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42509 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-08T00:46:18,699 INFO [RPCClient-NioEventLoopGroup-6-9 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestHBaseWalOnEC completed 2024-12-08T00:46:18,699 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table TestHBaseWalOnEC get assigned. Timeout = 60000ms 2024-12-08T00:46:18,699 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-08T00:46:18,702 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table TestHBaseWalOnEC assigned to meta. Checking AM states. 2024-12-08T00:46:18,702 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-08T00:46:18,702 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table TestHBaseWalOnEC assigned. 2024-12-08T00:46:18,705 DEBUG [RPCClient-NioEventLoopGroup-6-8 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestHBaseWalOnEC', row='row', locateType=CURRENT is [region=TestHBaseWalOnEC,,1733618778063.e1c6a0733dd237d94d427d22165b3d2a., hostname=0f983e3e5be1,39157,1733618776882, seqNum=2] 2024-12-08T00:46:18,709 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42509 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestHBaseWalOnEC 2024-12-08T00:46:18,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42509 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC 2024-12-08T00:46:18,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42509 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-08T00:46:18,713 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_PREPARE 2024-12-08T00:46:18,714 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-08T00:46:18,715 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-08T00:46:18,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42509 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-08T00:46:18,871 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39157 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-12-08T00:46:18,873 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0f983e3e5be1:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestHBaseWalOnEC,,1733618778063.e1c6a0733dd237d94d427d22165b3d2a. 2024-12-08T00:46:18,873 INFO [RS_FLUSH_OPERATIONS-regionserver/0f983e3e5be1:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing e1c6a0733dd237d94d427d22165b3d2a 1/1 column families, dataSize=32 B heapSize=360 B 2024-12-08T00:46:18,892 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0f983e3e5be1:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40987/user/jenkins/test-data/ba186fac-90bc-c946-ca0b-016b833ef7f1/data/default/TestHBaseWalOnEC/e1c6a0733dd237d94d427d22165b3d2a/.tmp/cf/8b42f056a4604143903f0c29950a03f9 is 36, key is row/cf:cq/1733618778707/Put/seqid=0 2024-12-08T00:46:18,899 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44495 is added to blk_1073741839_1015 (size=4787) 2024-12-08T00:46:18,899 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42107 is added to blk_1073741839_1015 (size=4787) 2024-12-08T00:46:18,899 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38147 is added to blk_1073741839_1015 (size=4787) 2024-12-08T00:46:18,900 INFO [RS_FLUSH_OPERATIONS-regionserver/0f983e3e5be1:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=32 B at sequenceid=5 (bloomFilter=false), to=hdfs://localhost:40987/user/jenkins/test-data/ba186fac-90bc-c946-ca0b-016b833ef7f1/data/default/TestHBaseWalOnEC/e1c6a0733dd237d94d427d22165b3d2a/.tmp/cf/8b42f056a4604143903f0c29950a03f9 2024-12-08T00:46:18,909 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0f983e3e5be1:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40987/user/jenkins/test-data/ba186fac-90bc-c946-ca0b-016b833ef7f1/data/default/TestHBaseWalOnEC/e1c6a0733dd237d94d427d22165b3d2a/.tmp/cf/8b42f056a4604143903f0c29950a03f9 as hdfs://localhost:40987/user/jenkins/test-data/ba186fac-90bc-c946-ca0b-016b833ef7f1/data/default/TestHBaseWalOnEC/e1c6a0733dd237d94d427d22165b3d2a/cf/8b42f056a4604143903f0c29950a03f9 2024-12-08T00:46:18,917 INFO [RS_FLUSH_OPERATIONS-regionserver/0f983e3e5be1:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40987/user/jenkins/test-data/ba186fac-90bc-c946-ca0b-016b833ef7f1/data/default/TestHBaseWalOnEC/e1c6a0733dd237d94d427d22165b3d2a/cf/8b42f056a4604143903f0c29950a03f9, entries=1, sequenceid=5, filesize=4.7 K 2024-12-08T00:46:18,918 INFO [RS_FLUSH_OPERATIONS-regionserver/0f983e3e5be1:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~32 B/32, heapSize ~344 B/344, currentSize=0 B/0 for e1c6a0733dd237d94d427d22165b3d2a in 45ms, sequenceid=5, compaction requested=false 2024-12-08T00:46:18,918 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0f983e3e5be1:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for e1c6a0733dd237d94d427d22165b3d2a: 2024-12-08T00:46:18,918 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0f983e3e5be1:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestHBaseWalOnEC,,1733618778063.e1c6a0733dd237d94d427d22165b3d2a. 2024-12-08T00:46:18,919 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0f983e3e5be1:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-12-08T00:46:18,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42509 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-12-08T00:46:18,924 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-12-08T00:46:18,924 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 207 msec 2024-12-08T00:46:18,927 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC in 216 msec 2024-12-08T00:46:19,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42509 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-08T00:46:19,028 INFO [RPCClient-NioEventLoopGroup-6-9 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestHBaseWalOnEC completed 2024-12-08T00:46:19,033 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-08T00:46:19,033 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-08T00:46:19,033 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-08T00:46:19,034 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T00:46:19,034 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T00:46:19,034 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-08T00:46:19,034 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-08T00:46:19,034 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=254442202, stopped=false 2024-12-08T00:46:19,034 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=0f983e3e5be1,42509,1733618776699 2024-12-08T00:46:19,095 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42617-0x10002f0f9030003, quorum=127.0.0.1:58726, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-08T00:46:19,095 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41367-0x10002f0f9030001, quorum=127.0.0.1:58726, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-08T00:46:19,095 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39157-0x10002f0f9030002, quorum=127.0.0.1:58726, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-08T00:46:19,095 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42509-0x10002f0f9030000, quorum=127.0.0.1:58726, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-08T00:46:19,095 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-08T00:46:19,095 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39157-0x10002f0f9030002, quorum=127.0.0.1:58726, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T00:46:19,095 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42509-0x10002f0f9030000, quorum=127.0.0.1:58726, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T00:46:19,095 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41367-0x10002f0f9030001, quorum=127.0.0.1:58726, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T00:46:19,096 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-08T00:46:19,096 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42617-0x10002f0f9030003, quorum=127.0.0.1:58726, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T00:46:19,096 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-08T00:46:19,097 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T00:46:19,097 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '0f983e3e5be1,41367,1733618776848' ***** 2024-12-08T00:46:19,097 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-08T00:46:19,098 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '0f983e3e5be1,39157,1733618776882' ***** 2024-12-08T00:46:19,098 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-08T00:46:19,098 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:42509-0x10002f0f9030000, quorum=127.0.0.1:58726, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-08T00:46:19,098 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '0f983e3e5be1,42617,1733618776920' ***** 2024-12-08T00:46:19,098 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-08T00:46:19,098 INFO [RS:1;0f983e3e5be1:39157 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-08T00:46:19,098 INFO [RS:0;0f983e3e5be1:41367 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-08T00:46:19,098 INFO [RS:1;0f983e3e5be1:39157 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-08T00:46:19,099 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:42617-0x10002f0f9030003, quorum=127.0.0.1:58726, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-08T00:46:19,099 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:39157-0x10002f0f9030002, quorum=127.0.0.1:58726, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-08T00:46:19,099 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-08T00:46:19,099 INFO [RS:1;0f983e3e5be1:39157 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-08T00:46:19,099 INFO [RS:0;0f983e3e5be1:41367 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-08T00:46:19,098 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-08T00:46:19,099 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:41367-0x10002f0f9030001, quorum=127.0.0.1:58726, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-08T00:46:19,099 INFO [RS:1;0f983e3e5be1:39157 {}] regionserver.HRegionServer(3091): Received CLOSE for e1c6a0733dd237d94d427d22165b3d2a 2024-12-08T00:46:19,100 INFO [RS:0;0f983e3e5be1:41367 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-08T00:46:19,099 INFO [RS:2;0f983e3e5be1:42617 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-08T00:46:19,100 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-08T00:46:19,100 INFO [RS:0;0f983e3e5be1:41367 {}] regionserver.HRegionServer(959): stopping server 0f983e3e5be1,41367,1733618776848 2024-12-08T00:46:19,100 INFO [RS:2;0f983e3e5be1:42617 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-08T00:46:19,100 INFO [RS:1;0f983e3e5be1:39157 {}] regionserver.HRegionServer(959): stopping server 0f983e3e5be1,39157,1733618776882 2024-12-08T00:46:19,100 INFO [RS:2;0f983e3e5be1:42617 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-08T00:46:19,101 INFO [RS:1;0f983e3e5be1:39157 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-08T00:46:19,101 INFO [RS:1;0f983e3e5be1:39157 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;0f983e3e5be1:39157. 2024-12-08T00:46:19,101 DEBUG [RS:1;0f983e3e5be1:39157 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-08T00:46:19,101 DEBUG [RS:1;0f983e3e5be1:39157 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T00:46:19,101 INFO [RS:1;0f983e3e5be1:39157 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-08T00:46:19,101 INFO [RS:1;0f983e3e5be1:39157 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-08T00:46:19,102 INFO [RS:1;0f983e3e5be1:39157 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-08T00:46:19,102 INFO [RS:1;0f983e3e5be1:39157 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-08T00:46:19,101 INFO [RS:2;0f983e3e5be1:42617 {}] regionserver.HRegionServer(959): stopping server 0f983e3e5be1,42617,1733618776920 2024-12-08T00:46:19,102 INFO [RS:2;0f983e3e5be1:42617 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-08T00:46:19,101 DEBUG [RS_CLOSE_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing e1c6a0733dd237d94d427d22165b3d2a, disabling compactions & flushes 2024-12-08T00:46:19,101 INFO [RS:0;0f983e3e5be1:41367 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-08T00:46:19,102 INFO [RS:2;0f983e3e5be1:42617 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:2;0f983e3e5be1:42617. 2024-12-08T00:46:19,102 INFO [RS:0;0f983e3e5be1:41367 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;0f983e3e5be1:41367. 2024-12-08T00:46:19,102 INFO [RS_CLOSE_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1733618778063.e1c6a0733dd237d94d427d22165b3d2a. 2024-12-08T00:46:19,102 INFO [RS:1;0f983e3e5be1:39157 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-12-08T00:46:19,102 DEBUG [RS:2;0f983e3e5be1:42617 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-08T00:46:19,102 DEBUG [RS:2;0f983e3e5be1:42617 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T00:46:19,102 DEBUG [RS:1;0f983e3e5be1:39157 {}] regionserver.HRegionServer(1325): Online Regions={e1c6a0733dd237d94d427d22165b3d2a=TestHBaseWalOnEC,,1733618778063.e1c6a0733dd237d94d427d22165b3d2a., 1588230740=hbase:meta,,1.1588230740} 2024-12-08T00:46:19,102 DEBUG [RS_CLOSE_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1733618778063.e1c6a0733dd237d94d427d22165b3d2a. 2024-12-08T00:46:19,102 DEBUG [RS:0;0f983e3e5be1:41367 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-08T00:46:19,102 DEBUG [RS:1;0f983e3e5be1:39157 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, e1c6a0733dd237d94d427d22165b3d2a 2024-12-08T00:46:19,102 DEBUG [RS:0;0f983e3e5be1:41367 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T00:46:19,102 DEBUG [RS_CLOSE_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1733618778063.e1c6a0733dd237d94d427d22165b3d2a. after waiting 0 ms 2024-12-08T00:46:19,102 INFO [RS:2;0f983e3e5be1:42617 {}] regionserver.HRegionServer(976): stopping server 0f983e3e5be1,42617,1733618776920; all regions closed. 2024-12-08T00:46:19,102 DEBUG [RS_CLOSE_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1733618778063.e1c6a0733dd237d94d427d22165b3d2a. 2024-12-08T00:46:19,102 INFO [RS:0;0f983e3e5be1:41367 {}] regionserver.HRegionServer(976): stopping server 0f983e3e5be1,41367,1733618776848; all regions closed. 2024-12-08T00:46:19,102 DEBUG [RS_CLOSE_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-08T00:46:19,102 INFO [RS_CLOSE_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-08T00:46:19,103 DEBUG [RS_CLOSE_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-08T00:46:19,103 DEBUG [RS_CLOSE_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-08T00:46:19,103 DEBUG [RS_CLOSE_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-08T00:46:19,103 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:46:19,103 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:46:19,103 INFO [RS_CLOSE_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.34 KB heapSize=3.38 KB 2024-12-08T00:46:19,103 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:46:19,103 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:46:19,103 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:46:19,106 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:46:19,106 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:46:19,106 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:46:19,107 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:46:19,107 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:46:19,108 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42107 is added to blk_1073741834_1010 (size=93) 2024-12-08T00:46:19,109 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38147 is added to blk_1073741834_1010 (size=93) 2024-12-08T00:46:19,111 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42107 is added to blk_1073741835_1011 (size=93) 2024-12-08T00:46:19,111 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44495 is added to blk_1073741834_1010 (size=93) 2024-12-08T00:46:19,111 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38147 is added to blk_1073741835_1011 (size=93) 2024-12-08T00:46:19,111 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44495 is added to blk_1073741835_1011 (size=93) 2024-12-08T00:46:19,114 DEBUG [RS_CLOSE_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40987/user/jenkins/test-data/ba186fac-90bc-c946-ca0b-016b833ef7f1/data/default/TestHBaseWalOnEC/e1c6a0733dd237d94d427d22165b3d2a/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-08T00:46:19,115 INFO [RS_CLOSE_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1733618778063.e1c6a0733dd237d94d427d22165b3d2a. 2024-12-08T00:46:19,115 DEBUG [RS_CLOSE_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for e1c6a0733dd237d94d427d22165b3d2a: Waiting for close lock at 1733618779101Running coprocessor pre-close hooks at 1733618779101Disabling compacts and flushes for region at 1733618779101Disabling writes for close at 1733618779102 (+1 ms)Writing region close event to WAL at 1733618779106 (+4 ms)Running coprocessor post-close hooks at 1733618779114 (+8 ms)Closed at 1733618779115 (+1 ms) 2024-12-08T00:46:19,115 DEBUG [RS:2;0f983e3e5be1:42617 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/ba186fac-90bc-c946-ca0b-016b833ef7f1/oldWALs 2024-12-08T00:46:19,115 DEBUG [RS_CLOSE_REGION-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestHBaseWalOnEC,,1733618778063.e1c6a0733dd237d94d427d22165b3d2a. 2024-12-08T00:46:19,115 INFO [RS:2;0f983e3e5be1:42617 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 0f983e3e5be1%2C42617%2C1733618776920:(num 1733618777615) 2024-12-08T00:46:19,115 DEBUG [RS:2;0f983e3e5be1:42617 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T00:46:19,115 INFO [RS:2;0f983e3e5be1:42617 {}] regionserver.LeaseManager(133): Closed leases 2024-12-08T00:46:19,115 INFO [RS:2;0f983e3e5be1:42617 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-08T00:46:19,116 INFO [RS:2;0f983e3e5be1:42617 {}] hbase.ChoreService(370): Chore service for: regionserver/0f983e3e5be1:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-08T00:46:19,116 INFO [RS:2;0f983e3e5be1:42617 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-08T00:46:19,116 INFO [regionserver/0f983e3e5be1:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-08T00:46:19,116 INFO [RS:2;0f983e3e5be1:42617 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-08T00:46:19,116 INFO [RS:2;0f983e3e5be1:42617 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-08T00:46:19,116 INFO [RS:2;0f983e3e5be1:42617 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-08T00:46:19,116 INFO [RS:2;0f983e3e5be1:42617 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:42617 2024-12-08T00:46:19,117 DEBUG [RS:0;0f983e3e5be1:41367 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/ba186fac-90bc-c946-ca0b-016b833ef7f1/oldWALs 2024-12-08T00:46:19,117 INFO [RS:0;0f983e3e5be1:41367 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 0f983e3e5be1%2C41367%2C1733618776848:(num 1733618777617) 2024-12-08T00:46:19,117 DEBUG [RS:0;0f983e3e5be1:41367 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T00:46:19,117 INFO [RS:0;0f983e3e5be1:41367 {}] regionserver.LeaseManager(133): Closed leases 2024-12-08T00:46:19,117 INFO [RS:0;0f983e3e5be1:41367 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-08T00:46:19,118 INFO [RS:0;0f983e3e5be1:41367 {}] hbase.ChoreService(370): Chore service for: regionserver/0f983e3e5be1:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-08T00:46:19,118 INFO [RS:0;0f983e3e5be1:41367 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-08T00:46:19,118 INFO [RS:0;0f983e3e5be1:41367 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-08T00:46:19,118 INFO [regionserver/0f983e3e5be1:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-08T00:46:19,118 INFO [RS:0;0f983e3e5be1:41367 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-08T00:46:19,118 INFO [RS:0;0f983e3e5be1:41367 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-08T00:46:19,118 INFO [RS:0;0f983e3e5be1:41367 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:41367 2024-12-08T00:46:19,123 DEBUG [RS_CLOSE_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40987/user/jenkins/test-data/ba186fac-90bc-c946-ca0b-016b833ef7f1/data/hbase/meta/1588230740/.tmp/info/faad202215004abb893260ab7bb2c337 is 153, key is TestHBaseWalOnEC,,1733618778063.e1c6a0733dd237d94d427d22165b3d2a./info:regioninfo/1733618778455/Put/seqid=0 2024-12-08T00:46:19,125 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42617-0x10002f0f9030003, quorum=127.0.0.1:58726, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/0f983e3e5be1,42617,1733618776920 2024-12-08T00:46:19,125 INFO [RS:2;0f983e3e5be1:42617 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-08T00:46:19,125 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42509-0x10002f0f9030000, quorum=127.0.0.1:58726, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-08T00:46:19,129 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42107 is added to blk_1073741840_1016 (size=6637) 2024-12-08T00:46:19,129 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38147 is added to blk_1073741840_1016 (size=6637) 2024-12-08T00:46:19,130 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44495 is added to blk_1073741840_1016 (size=6637) 2024-12-08T00:46:19,130 INFO [RS_CLOSE_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.18 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:40987/user/jenkins/test-data/ba186fac-90bc-c946-ca0b-016b833ef7f1/data/hbase/meta/1588230740/.tmp/info/faad202215004abb893260ab7bb2c337 2024-12-08T00:46:19,134 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41367-0x10002f0f9030001, quorum=127.0.0.1:58726, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/0f983e3e5be1,41367,1733618776848 2024-12-08T00:46:19,134 INFO [RS:0;0f983e3e5be1:41367 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-08T00:46:19,134 ERROR [Time-limited test-EventThread {}] zookeeper.ClientCnxn$EventThread(581): Error while calling watcher. java.util.concurrent.RejectedExecutionException: Task org.apache.hadoop.hbase.trace.TraceUtil$$Lambda$377/0x00007fbea88f6b88@61e34840 rejected from java.util.concurrent.ThreadPoolExecutor@24e1ca18[Terminated, pool size = 0, active threads = 0, queued tasks = 0, completed tasks = 14] at java.util.concurrent.ThreadPoolExecutor$AbortPolicy.rejectedExecution(ThreadPoolExecutor.java:2065) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.reject(ThreadPoolExecutor.java:833) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.execute(ThreadPoolExecutor.java:1360) ~[?:?] at java.util.concurrent.Executors$DelegatedExecutorService.execute(Executors.java:721) ~[?:?] at org.apache.hadoop.hbase.zookeeper.ZKWatcher.process(ZKWatcher.java:613) ~[hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.zookeeper.ClientCnxn$EventThread.processEvent(ClientCnxn.java:579) ~[zookeeper-3.8.4.jar:3.8.4] at org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:554) ~[zookeeper-3.8.4.jar:3.8.4] 2024-12-08T00:46:19,142 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [0f983e3e5be1,42617,1733618776920] 2024-12-08T00:46:19,153 DEBUG [RS_CLOSE_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40987/user/jenkins/test-data/ba186fac-90bc-c946-ca0b-016b833ef7f1/data/hbase/meta/1588230740/.tmp/ns/a682de84cb2b4ebba59f48357b584fba is 43, key is default/ns:d/1733618778024/Put/seqid=0 2024-12-08T00:46:19,159 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/0f983e3e5be1,42617,1733618776920 already deleted, retry=false 2024-12-08T00:46:19,159 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 0f983e3e5be1,42617,1733618776920 expired; onlineServers=2 2024-12-08T00:46:19,159 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [0f983e3e5be1,41367,1733618776848] 2024-12-08T00:46:19,159 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42107 is added to blk_1073741841_1017 (size=5153) 2024-12-08T00:46:19,159 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38147 is added to blk_1073741841_1017 (size=5153) 2024-12-08T00:46:19,159 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44495 is added to blk_1073741841_1017 (size=5153) 2024-12-08T00:46:19,160 INFO [RS_CLOSE_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:40987/user/jenkins/test-data/ba186fac-90bc-c946-ca0b-016b833ef7f1/data/hbase/meta/1588230740/.tmp/ns/a682de84cb2b4ebba59f48357b584fba 2024-12-08T00:46:19,169 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/0f983e3e5be1,41367,1733618776848 already deleted, retry=false 2024-12-08T00:46:19,170 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 0f983e3e5be1,41367,1733618776848 expired; onlineServers=1 2024-12-08T00:46:19,172 INFO [regionserver/0f983e3e5be1:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-08T00:46:19,178 INFO [regionserver/0f983e3e5be1:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-08T00:46:19,178 INFO [regionserver/0f983e3e5be1:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-08T00:46:19,182 DEBUG [RS_CLOSE_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40987/user/jenkins/test-data/ba186fac-90bc-c946-ca0b-016b833ef7f1/data/hbase/meta/1588230740/.tmp/table/daaaaf95317f4ed48fc6a13d21bfc904 is 52, key is TestHBaseWalOnEC/table:state/1733618778469/Put/seqid=0 2024-12-08T00:46:19,188 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42107 is added to blk_1073741842_1018 (size=5249) 2024-12-08T00:46:19,188 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44495 is added to blk_1073741842_1018 (size=5249) 2024-12-08T00:46:19,189 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38147 is added to blk_1073741842_1018 (size=5249) 2024-12-08T00:46:19,190 INFO [RS_CLOSE_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=96 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:40987/user/jenkins/test-data/ba186fac-90bc-c946-ca0b-016b833ef7f1/data/hbase/meta/1588230740/.tmp/table/daaaaf95317f4ed48fc6a13d21bfc904 2024-12-08T00:46:19,199 DEBUG [RS_CLOSE_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40987/user/jenkins/test-data/ba186fac-90bc-c946-ca0b-016b833ef7f1/data/hbase/meta/1588230740/.tmp/info/faad202215004abb893260ab7bb2c337 as hdfs://localhost:40987/user/jenkins/test-data/ba186fac-90bc-c946-ca0b-016b833ef7f1/data/hbase/meta/1588230740/info/faad202215004abb893260ab7bb2c337 2024-12-08T00:46:19,209 INFO [RS_CLOSE_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40987/user/jenkins/test-data/ba186fac-90bc-c946-ca0b-016b833ef7f1/data/hbase/meta/1588230740/info/faad202215004abb893260ab7bb2c337, entries=10, sequenceid=11, filesize=6.5 K 2024-12-08T00:46:19,210 DEBUG [RS_CLOSE_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40987/user/jenkins/test-data/ba186fac-90bc-c946-ca0b-016b833ef7f1/data/hbase/meta/1588230740/.tmp/ns/a682de84cb2b4ebba59f48357b584fba as hdfs://localhost:40987/user/jenkins/test-data/ba186fac-90bc-c946-ca0b-016b833ef7f1/data/hbase/meta/1588230740/ns/a682de84cb2b4ebba59f48357b584fba 2024-12-08T00:46:19,218 INFO [RS_CLOSE_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40987/user/jenkins/test-data/ba186fac-90bc-c946-ca0b-016b833ef7f1/data/hbase/meta/1588230740/ns/a682de84cb2b4ebba59f48357b584fba, entries=2, sequenceid=11, filesize=5.0 K 2024-12-08T00:46:19,220 DEBUG [RS_CLOSE_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40987/user/jenkins/test-data/ba186fac-90bc-c946-ca0b-016b833ef7f1/data/hbase/meta/1588230740/.tmp/table/daaaaf95317f4ed48fc6a13d21bfc904 as hdfs://localhost:40987/user/jenkins/test-data/ba186fac-90bc-c946-ca0b-016b833ef7f1/data/hbase/meta/1588230740/table/daaaaf95317f4ed48fc6a13d21bfc904 2024-12-08T00:46:19,229 INFO [RS_CLOSE_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40987/user/jenkins/test-data/ba186fac-90bc-c946-ca0b-016b833ef7f1/data/hbase/meta/1588230740/table/daaaaf95317f4ed48fc6a13d21bfc904, entries=2, sequenceid=11, filesize=5.1 K 2024-12-08T00:46:19,231 INFO [RS_CLOSE_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 128ms, sequenceid=11, compaction requested=false 2024-12-08T00:46:19,237 DEBUG [RS_CLOSE_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40987/user/jenkins/test-data/ba186fac-90bc-c946-ca0b-016b833ef7f1/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-12-08T00:46:19,238 DEBUG [RS_CLOSE_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-08T00:46:19,238 INFO [RS_CLOSE_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-08T00:46:19,238 DEBUG [RS_CLOSE_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733618779102Running coprocessor pre-close hooks at 1733618779102Disabling compacts and flushes for region at 1733618779102Disabling writes for close at 1733618779103 (+1 ms)Obtaining lock to block concurrent updates at 1733618779103Preparing flush snapshotting stores in 1588230740 at 1733618779103Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1377, getHeapSize=3392, getOffHeapSize=0, getCellsCount=14 at 1733618779104 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1733618779105 (+1 ms)Flushing 1588230740/info: creating writer at 1733618779105Flushing 1588230740/info: appending metadata at 1733618779123 (+18 ms)Flushing 1588230740/info: closing flushed file at 1733618779123Flushing 1588230740/ns: creating writer at 1733618779138 (+15 ms)Flushing 1588230740/ns: appending metadata at 1733618779152 (+14 ms)Flushing 1588230740/ns: closing flushed file at 1733618779152Flushing 1588230740/table: creating writer at 1733618779167 (+15 ms)Flushing 1588230740/table: appending metadata at 1733618779181 (+14 ms)Flushing 1588230740/table: closing flushed file at 1733618779181Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2debe4a2: reopening flushed file at 1733618779198 (+17 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@72366b78: reopening flushed file at 1733618779209 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2f1a2268: reopening flushed file at 1733618779219 (+10 ms)Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 128ms, sequenceid=11, compaction requested=false at 1733618779231 (+12 ms)Writing region close event to WAL at 1733618779232 (+1 ms)Running coprocessor post-close hooks at 1733618779238 (+6 ms)Closed at 1733618779238 2024-12-08T00:46:19,238 DEBUG [RS_CLOSE_META-regionserver/0f983e3e5be1:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-08T00:46:19,242 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42617-0x10002f0f9030003, quorum=127.0.0.1:58726, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-08T00:46:19,242 INFO [RS:2;0f983e3e5be1:42617 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-08T00:46:19,242 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42617-0x10002f0f9030003, quorum=127.0.0.1:58726, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-08T00:46:19,242 INFO [RS:2;0f983e3e5be1:42617 {}] regionserver.HRegionServer(1031): Exiting; stopping=0f983e3e5be1,42617,1733618776920; zookeeper connection closed. 2024-12-08T00:46:19,243 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@a72ddb7 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@a72ddb7 2024-12-08T00:46:19,251 INFO [RS:0;0f983e3e5be1:41367 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-08T00:46:19,251 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41367-0x10002f0f9030001, quorum=127.0.0.1:58726, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-08T00:46:19,251 INFO [RS:0;0f983e3e5be1:41367 {}] regionserver.HRegionServer(1031): Exiting; stopping=0f983e3e5be1,41367,1733618776848; zookeeper connection closed. 2024-12-08T00:46:19,251 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41367-0x10002f0f9030001, quorum=127.0.0.1:58726, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-08T00:46:19,251 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@71f71f44 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@71f71f44 2024-12-08T00:46:19,291 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-12-08T00:46:19,291 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-12-08T00:46:19,302 INFO [RS:1;0f983e3e5be1:39157 {}] regionserver.HRegionServer(976): stopping server 0f983e3e5be1,39157,1733618776882; all regions closed. 2024-12-08T00:46:19,303 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:46:19,303 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:46:19,303 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:46:19,303 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:46:19,304 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:46:19,306 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44495 is added to blk_1073741836_1012 (size=2751) 2024-12-08T00:46:19,307 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38147 is added to blk_1073741836_1012 (size=2751) 2024-12-08T00:46:19,307 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42107 is added to blk_1073741836_1012 (size=2751) 2024-12-08T00:46:19,310 DEBUG [RS:1;0f983e3e5be1:39157 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/ba186fac-90bc-c946-ca0b-016b833ef7f1/oldWALs 2024-12-08T00:46:19,310 INFO [RS:1;0f983e3e5be1:39157 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 0f983e3e5be1%2C39157%2C1733618776882.meta:.meta(num 1733618777953) 2024-12-08T00:46:19,310 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:46:19,310 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:46:19,310 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:46:19,311 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:46:19,311 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:46:19,314 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42107 is added to blk_1073741833_1009 (size=1298) 2024-12-08T00:46:19,314 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38147 is added to blk_1073741833_1009 (size=1298) 2024-12-08T00:46:19,314 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44495 is added to blk_1073741833_1009 (size=1298) 2024-12-08T00:46:19,535 INFO [regionserver/0f983e3e5be1:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-08T00:46:19,535 INFO [regionserver/0f983e3e5be1:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-08T00:46:19,723 DEBUG [RS:1;0f983e3e5be1:39157 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/ba186fac-90bc-c946-ca0b-016b833ef7f1/oldWALs 2024-12-08T00:46:19,724 INFO [RS:1;0f983e3e5be1:39157 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 0f983e3e5be1%2C39157%2C1733618776882:(num 1733618777614) 2024-12-08T00:46:19,724 DEBUG [RS:1;0f983e3e5be1:39157 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T00:46:19,724 INFO [RS:1;0f983e3e5be1:39157 {}] regionserver.LeaseManager(133): Closed leases 2024-12-08T00:46:19,724 INFO [RS:1;0f983e3e5be1:39157 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-08T00:46:19,725 INFO [RS:1;0f983e3e5be1:39157 {}] hbase.ChoreService(370): Chore service for: regionserver/0f983e3e5be1:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-08T00:46:19,725 INFO [RS:1;0f983e3e5be1:39157 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-08T00:46:19,725 INFO [regionserver/0f983e3e5be1:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-08T00:46:19,726 INFO [RS:1;0f983e3e5be1:39157 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:39157 2024-12-08T00:46:19,786 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42509-0x10002f0f9030000, quorum=127.0.0.1:58726, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-08T00:46:19,786 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39157-0x10002f0f9030002, quorum=127.0.0.1:58726, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/0f983e3e5be1,39157,1733618776882 2024-12-08T00:46:19,786 INFO [RS:1;0f983e3e5be1:39157 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-08T00:46:19,795 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [0f983e3e5be1,39157,1733618776882] 2024-12-08T00:46:19,803 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/0f983e3e5be1,39157,1733618776882 already deleted, retry=false 2024-12-08T00:46:19,803 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 0f983e3e5be1,39157,1733618776882 expired; onlineServers=0 2024-12-08T00:46:19,803 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '0f983e3e5be1,42509,1733618776699' ***** 2024-12-08T00:46:19,803 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-08T00:46:19,804 INFO [M:0;0f983e3e5be1:42509 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-08T00:46:19,804 INFO [M:0;0f983e3e5be1:42509 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-08T00:46:19,804 DEBUG [M:0;0f983e3e5be1:42509 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-08T00:46:19,804 DEBUG [M:0;0f983e3e5be1:42509 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-08T00:46:19,804 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-08T00:46:19,804 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster-HFileCleaner.small.0-1733618777280 {}] cleaner.HFileCleaner(306): Exit Thread[master/0f983e3e5be1:0:becomeActiveMaster-HFileCleaner.small.0-1733618777280,5,FailOnTimeoutGroup] 2024-12-08T00:46:19,804 DEBUG [master/0f983e3e5be1:0:becomeActiveMaster-HFileCleaner.large.0-1733618777280 {}] cleaner.HFileCleaner(306): Exit Thread[master/0f983e3e5be1:0:becomeActiveMaster-HFileCleaner.large.0-1733618777280,5,FailOnTimeoutGroup] 2024-12-08T00:46:19,805 INFO [M:0;0f983e3e5be1:42509 {}] hbase.ChoreService(370): Chore service for: master/0f983e3e5be1:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-08T00:46:19,805 INFO [M:0;0f983e3e5be1:42509 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-08T00:46:19,805 DEBUG [M:0;0f983e3e5be1:42509 {}] master.HMaster(1795): Stopping service threads 2024-12-08T00:46:19,805 INFO [M:0;0f983e3e5be1:42509 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-08T00:46:19,805 INFO [M:0;0f983e3e5be1:42509 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-08T00:46:19,805 INFO [M:0;0f983e3e5be1:42509 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-08T00:46:19,806 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-08T00:46:19,811 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42509-0x10002f0f9030000, quorum=127.0.0.1:58726, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-08T00:46:19,811 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42509-0x10002f0f9030000, quorum=127.0.0.1:58726, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T00:46:19,811 DEBUG [M:0;0f983e3e5be1:42509 {}] zookeeper.ZKUtil(347): master:42509-0x10002f0f9030000, quorum=127.0.0.1:58726, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-08T00:46:19,811 WARN [M:0;0f983e3e5be1:42509 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-08T00:46:19,812 INFO [M:0;0f983e3e5be1:42509 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:40987/user/jenkins/test-data/ba186fac-90bc-c946-ca0b-016b833ef7f1/.lastflushedseqids 2024-12-08T00:46:19,821 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42107 is added to blk_1073741843_1019 (size=127) 2024-12-08T00:46:19,822 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44495 is added to blk_1073741843_1019 (size=127) 2024-12-08T00:46:19,822 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38147 is added to blk_1073741843_1019 (size=127) 2024-12-08T00:46:19,823 INFO [M:0;0f983e3e5be1:42509 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-08T00:46:19,823 INFO [M:0;0f983e3e5be1:42509 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-08T00:46:19,824 DEBUG [M:0;0f983e3e5be1:42509 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-08T00:46:19,824 INFO [M:0;0f983e3e5be1:42509 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T00:46:19,824 DEBUG [M:0;0f983e3e5be1:42509 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T00:46:19,824 DEBUG [M:0;0f983e3e5be1:42509 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-08T00:46:19,824 DEBUG [M:0;0f983e3e5be1:42509 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T00:46:19,824 INFO [M:0;0f983e3e5be1:42509 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=26.84 KB heapSize=34.13 KB 2024-12-08T00:46:19,847 DEBUG [M:0;0f983e3e5be1:42509 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40987/user/jenkins/test-data/ba186fac-90bc-c946-ca0b-016b833ef7f1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/a2d23d7463a6435b862b08d15f772a33 is 82, key is hbase:meta,,1/info:regioninfo/1733618777995/Put/seqid=0 2024-12-08T00:46:19,854 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38147 is added to blk_1073741844_1020 (size=5672) 2024-12-08T00:46:19,854 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44495 is added to blk_1073741844_1020 (size=5672) 2024-12-08T00:46:19,854 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42107 is added to blk_1073741844_1020 (size=5672) 2024-12-08T00:46:19,855 INFO [M:0;0f983e3e5be1:42509 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:40987/user/jenkins/test-data/ba186fac-90bc-c946-ca0b-016b833ef7f1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/a2d23d7463a6435b862b08d15f772a33 2024-12-08T00:46:19,875 DEBUG [M:0;0f983e3e5be1:42509 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40987/user/jenkins/test-data/ba186fac-90bc-c946-ca0b-016b833ef7f1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/36ade08890d740f59b2ae6c112601918 is 748, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1733618778476/Put/seqid=0 2024-12-08T00:46:19,881 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38147 is added to blk_1073741845_1021 (size=6440) 2024-12-08T00:46:19,881 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42107 is added to blk_1073741845_1021 (size=6440) 2024-12-08T00:46:19,882 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44495 is added to blk_1073741845_1021 (size=6440) 2024-12-08T00:46:19,882 INFO [M:0;0f983e3e5be1:42509 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.15 KB at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:40987/user/jenkins/test-data/ba186fac-90bc-c946-ca0b-016b833ef7f1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/36ade08890d740f59b2ae6c112601918 2024-12-08T00:46:19,895 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39157-0x10002f0f9030002, quorum=127.0.0.1:58726, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-08T00:46:19,895 INFO [RS:1;0f983e3e5be1:39157 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-08T00:46:19,895 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39157-0x10002f0f9030002, quorum=127.0.0.1:58726, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-08T00:46:19,895 INFO [RS:1;0f983e3e5be1:39157 {}] regionserver.HRegionServer(1031): Exiting; stopping=0f983e3e5be1,39157,1733618776882; zookeeper connection closed. 2024-12-08T00:46:19,895 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@4c8aa3b6 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@4c8aa3b6 2024-12-08T00:46:19,896 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 3 regionserver(s) complete 2024-12-08T00:46:19,904 DEBUG [M:0;0f983e3e5be1:42509 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40987/user/jenkins/test-data/ba186fac-90bc-c946-ca0b-016b833ef7f1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/4eb467e47bf247e8951f7a906272d51f is 69, key is 0f983e3e5be1,39157,1733618776882/rs:state/1733618777414/Put/seqid=0 2024-12-08T00:46:19,912 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44495 is added to blk_1073741846_1022 (size=5294) 2024-12-08T00:46:19,912 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38147 is added to blk_1073741846_1022 (size=5294) 2024-12-08T00:46:19,912 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42107 is added to blk_1073741846_1022 (size=5294) 2024-12-08T00:46:19,913 INFO [M:0;0f983e3e5be1:42509 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=195 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:40987/user/jenkins/test-data/ba186fac-90bc-c946-ca0b-016b833ef7f1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/4eb467e47bf247e8951f7a906272d51f 2024-12-08T00:46:19,919 DEBUG [M:0;0f983e3e5be1:42509 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40987/user/jenkins/test-data/ba186fac-90bc-c946-ca0b-016b833ef7f1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/a2d23d7463a6435b862b08d15f772a33 as hdfs://localhost:40987/user/jenkins/test-data/ba186fac-90bc-c946-ca0b-016b833ef7f1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/a2d23d7463a6435b862b08d15f772a33 2024-12-08T00:46:19,927 INFO [M:0;0f983e3e5be1:42509 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40987/user/jenkins/test-data/ba186fac-90bc-c946-ca0b-016b833ef7f1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/a2d23d7463a6435b862b08d15f772a33, entries=8, sequenceid=72, filesize=5.5 K 2024-12-08T00:46:19,928 DEBUG [M:0;0f983e3e5be1:42509 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40987/user/jenkins/test-data/ba186fac-90bc-c946-ca0b-016b833ef7f1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/36ade08890d740f59b2ae6c112601918 as hdfs://localhost:40987/user/jenkins/test-data/ba186fac-90bc-c946-ca0b-016b833ef7f1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/36ade08890d740f59b2ae6c112601918 2024-12-08T00:46:19,935 INFO [M:0;0f983e3e5be1:42509 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40987/user/jenkins/test-data/ba186fac-90bc-c946-ca0b-016b833ef7f1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/36ade08890d740f59b2ae6c112601918, entries=8, sequenceid=72, filesize=6.3 K 2024-12-08T00:46:19,936 DEBUG [M:0;0f983e3e5be1:42509 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40987/user/jenkins/test-data/ba186fac-90bc-c946-ca0b-016b833ef7f1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/4eb467e47bf247e8951f7a906272d51f as hdfs://localhost:40987/user/jenkins/test-data/ba186fac-90bc-c946-ca0b-016b833ef7f1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/4eb467e47bf247e8951f7a906272d51f 2024-12-08T00:46:19,944 INFO [M:0;0f983e3e5be1:42509 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40987/user/jenkins/test-data/ba186fac-90bc-c946-ca0b-016b833ef7f1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/4eb467e47bf247e8951f7a906272d51f, entries=3, sequenceid=72, filesize=5.2 K 2024-12-08T00:46:19,946 INFO [M:0;0f983e3e5be1:42509 {}] regionserver.HRegion(3140): Finished flush of dataSize ~26.84 KB/27480, heapSize ~33.83 KB/34640, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 121ms, sequenceid=72, compaction requested=false 2024-12-08T00:46:19,947 INFO [M:0;0f983e3e5be1:42509 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T00:46:19,947 DEBUG [M:0;0f983e3e5be1:42509 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733618779824Disabling compacts and flushes for region at 1733618779824Disabling writes for close at 1733618779824Obtaining lock to block concurrent updates at 1733618779824Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733618779824Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=27480, getHeapSize=34880, getOffHeapSize=0, getCellsCount=85 at 1733618779825 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733618779826 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733618779826Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733618779847 (+21 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733618779847Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733618779861 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733618779875 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733618779875Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733618779889 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733618779904 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733618779904Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7dce5c40: reopening flushed file at 1733618779918 (+14 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3acf67: reopening flushed file at 1733618779927 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5471bc23: reopening flushed file at 1733618779935 (+8 ms)Finished flush of dataSize ~26.84 KB/27480, heapSize ~33.83 KB/34640, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 121ms, sequenceid=72, compaction requested=false at 1733618779946 (+11 ms)Writing region close event to WAL at 1733618779947 (+1 ms)Closed at 1733618779947 2024-12-08T00:46:19,947 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:46:19,947 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:46:19,947 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:46:19,947 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:46:19,947 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T00:46:19,949 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44495 is added to blk_1073741830_1006 (size=32683) 2024-12-08T00:46:19,950 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38147 is added to blk_1073741830_1006 (size=32683) 2024-12-08T00:46:19,950 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42107 is added to blk_1073741830_1006 (size=32683) 2024-12-08T00:46:19,951 INFO [M:0;0f983e3e5be1:42509 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-08T00:46:19,951 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-08T00:46:19,951 INFO [M:0;0f983e3e5be1:42509 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:42509 2024-12-08T00:46:19,951 INFO [M:0;0f983e3e5be1:42509 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-08T00:46:20,078 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42509-0x10002f0f9030000, quorum=127.0.0.1:58726, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-08T00:46:20,078 INFO [M:0;0f983e3e5be1:42509 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-08T00:46:20,079 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42509-0x10002f0f9030000, quorum=127.0.0.1:58726, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-08T00:46:20,086 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6e89cb0b{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-08T00:46:20,087 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6b3c8c82{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-08T00:46:20,087 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-08T00:46:20,087 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4c77de1{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-08T00:46:20,087 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@73f6422f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9b239ae7-7a70-0b46-e541-fc72a85e4f85/hadoop.log.dir/,STOPPED} 2024-12-08T00:46:20,089 WARN [BP-999966035-172.17.0.2-1733618774799 heartbeating to localhost/127.0.0.1:40987 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-08T00:46:20,089 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-08T00:46:20,089 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-08T00:46:20,089 WARN [BP-999966035-172.17.0.2-1733618774799 heartbeating to localhost/127.0.0.1:40987 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-999966035-172.17.0.2-1733618774799 (Datanode Uuid 9f4c8d06-da19-45ff-8182-f77e68c06b76) service to localhost/127.0.0.1:40987 2024-12-08T00:46:20,090 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9b239ae7-7a70-0b46-e541-fc72a85e4f85/cluster_df0b88a1-4eae-bae8-26c2-deef4205a8eb/data/data5/current/BP-999966035-172.17.0.2-1733618774799 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-08T00:46:20,090 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9b239ae7-7a70-0b46-e541-fc72a85e4f85/cluster_df0b88a1-4eae-bae8-26c2-deef4205a8eb/data/data6/current/BP-999966035-172.17.0.2-1733618774799 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-08T00:46:20,090 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-08T00:46:20,092 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6f8d2ee2{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-08T00:46:20,092 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6beabb01{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-08T00:46:20,092 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-08T00:46:20,092 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4e5afbc4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-08T00:46:20,093 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2c597470{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9b239ae7-7a70-0b46-e541-fc72a85e4f85/hadoop.log.dir/,STOPPED} 2024-12-08T00:46:20,094 WARN [BP-999966035-172.17.0.2-1733618774799 heartbeating to localhost/127.0.0.1:40987 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-08T00:46:20,094 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-08T00:46:20,094 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-08T00:46:20,094 WARN [BP-999966035-172.17.0.2-1733618774799 heartbeating to localhost/127.0.0.1:40987 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-999966035-172.17.0.2-1733618774799 (Datanode Uuid 74131f4e-aee3-452e-aae2-b0f177768a54) service to localhost/127.0.0.1:40987 2024-12-08T00:46:20,094 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9b239ae7-7a70-0b46-e541-fc72a85e4f85/cluster_df0b88a1-4eae-bae8-26c2-deef4205a8eb/data/data3/current/BP-999966035-172.17.0.2-1733618774799 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-08T00:46:20,094 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9b239ae7-7a70-0b46-e541-fc72a85e4f85/cluster_df0b88a1-4eae-bae8-26c2-deef4205a8eb/data/data4/current/BP-999966035-172.17.0.2-1733618774799 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-08T00:46:20,094 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-08T00:46:20,096 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@700f39d7{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-08T00:46:20,096 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4e9ae4fc{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-08T00:46:20,096 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-08T00:46:20,096 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@61d23bc{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-08T00:46:20,096 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@137179d0{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9b239ae7-7a70-0b46-e541-fc72a85e4f85/hadoop.log.dir/,STOPPED} 2024-12-08T00:46:20,097 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-08T00:46:20,097 WARN [BP-999966035-172.17.0.2-1733618774799 heartbeating to localhost/127.0.0.1:40987 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-08T00:46:20,097 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-08T00:46:20,097 WARN [BP-999966035-172.17.0.2-1733618774799 heartbeating to localhost/127.0.0.1:40987 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-999966035-172.17.0.2-1733618774799 (Datanode Uuid ad7a1ce7-eb1a-4d51-a229-8417d2ca59c6) service to localhost/127.0.0.1:40987 2024-12-08T00:46:20,098 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9b239ae7-7a70-0b46-e541-fc72a85e4f85/cluster_df0b88a1-4eae-bae8-26c2-deef4205a8eb/data/data1/current/BP-999966035-172.17.0.2-1733618774799 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-08T00:46:20,098 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9b239ae7-7a70-0b46-e541-fc72a85e4f85/cluster_df0b88a1-4eae-bae8-26c2-deef4205a8eb/data/data2/current/BP-999966035-172.17.0.2-1733618774799 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-08T00:46:20,098 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-08T00:46:20,103 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6ffa125c{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-08T00:46:20,103 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3aa18531{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-08T00:46:20,103 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-08T00:46:20,104 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@16eaa68d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-08T00:46:20,104 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@18f854cf{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9b239ae7-7a70-0b46-e541-fc72a85e4f85/hadoop.log.dir/,STOPPED} 2024-12-08T00:46:20,110 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-08T00:46:20,133 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-08T00:46:20,139 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestHBaseWalOnEC#testReadWrite[1] Thread=149 (was 90) - Thread LEAK? -, OpenFileDescriptor=518 (was 445) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=164 (was 179), ProcessCount=11 (was 11), AvailableMemoryMB=17671 (was 17829)