2024-11-23 15:44:54,937 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba 2024-11-23 15:44:54,954 main DEBUG Took 0.014064 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-11-23 15:44:54,954 main DEBUG PluginManager 'Core' found 129 plugins 2024-11-23 15:44:54,954 main DEBUG PluginManager 'Level' found 0 plugins 2024-11-23 15:44:54,956 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-11-23 15:44:54,957 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-23 15:44:54,982 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-11-23 15:44:54,999 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-23 15:44:55,004 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-23 15:44:55,005 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-23 15:44:55,005 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-23 15:44:55,006 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-23 15:44:55,006 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-23 15:44:55,008 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-23 15:44:55,008 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-23 15:44:55,009 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-23 15:44:55,009 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-23 15:44:55,010 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-23 15:44:55,011 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-23 15:44:55,012 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-23 15:44:55,012 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-23 15:44:55,013 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-23 15:44:55,013 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-23 15:44:55,014 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-23 15:44:55,015 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-23 15:44:55,015 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-23 15:44:55,016 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-23 15:44:55,017 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-23 15:44:55,017 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-23 15:44:55,018 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-23 15:44:55,019 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-23 15:44:55,020 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-23 15:44:55,020 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-11-23 15:44:55,022 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-23 15:44:55,024 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-11-23 15:44:55,027 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-11-23 15:44:55,028 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-11-23 15:44:55,030 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-11-23 15:44:55,031 main DEBUG PluginManager 'Converter' found 47 plugins 2024-11-23 15:44:55,042 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-11-23 15:44:55,046 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-11-23 15:44:55,049 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-11-23 15:44:55,049 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-11-23 15:44:55,050 main DEBUG createAppenders(={Console}) 2024-11-23 15:44:55,051 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba initialized 2024-11-23 15:44:55,051 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba 2024-11-23 15:44:55,052 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba OK. 2024-11-23 15:44:55,053 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-11-23 15:44:55,053 main DEBUG OutputStream closed 2024-11-23 15:44:55,053 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-11-23 15:44:55,054 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-11-23 15:44:55,054 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@49c7b90e OK 2024-11-23 15:44:55,163 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-11-23 15:44:55,166 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-11-23 15:44:55,168 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-11-23 15:44:55,169 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-11-23 15:44:55,170 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-11-23 15:44:55,171 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-11-23 15:44:55,171 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-11-23 15:44:55,172 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-11-23 15:44:55,172 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-11-23 15:44:55,173 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-11-23 15:44:55,173 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-11-23 15:44:55,174 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-11-23 15:44:55,174 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-11-23 15:44:55,174 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-11-23 15:44:55,175 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-11-23 15:44:55,175 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-11-23 15:44:55,176 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-11-23 15:44:55,177 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-11-23 15:44:55,180 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-23 15:44:55,180 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-logging/target/hbase-logging-4.0.0-alpha-1-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@35432107) with optional ClassLoader: null 2024-11-23 15:44:55,181 main DEBUG Shutdown hook enabled. Registering a new one. 2024-11-23 15:44:55,183 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@35432107] started OK. 2024-11-23T15:44:55,203 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC timeout: 26 mins 2024-11-23 15:44:55,207 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-11-23 15:44:55,207 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-23T15:44:55,496 DEBUG [main {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ca2e2dc7-7707-5b77-c243-862433037f30 2024-11-23T15:44:55,529 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ca2e2dc7-7707-5b77-c243-862433037f30/cluster_dc00375d-5416-8448-2106-d59715ddd12b, deleteOnExit=true 2024-11-23T15:44:55,530 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ca2e2dc7-7707-5b77-c243-862433037f30/test.cache.data in system properties and HBase conf 2024-11-23T15:44:55,531 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ca2e2dc7-7707-5b77-c243-862433037f30/hadoop.tmp.dir in system properties and HBase conf 2024-11-23T15:44:55,532 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ca2e2dc7-7707-5b77-c243-862433037f30/hadoop.log.dir in system properties and HBase conf 2024-11-23T15:44:55,533 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ca2e2dc7-7707-5b77-c243-862433037f30/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-23T15:44:55,534 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ca2e2dc7-7707-5b77-c243-862433037f30/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-23T15:44:55,534 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-23T15:44:55,647 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-11-23T15:44:55,738 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-23T15:44:55,743 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ca2e2dc7-7707-5b77-c243-862433037f30/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-23T15:44:55,744 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ca2e2dc7-7707-5b77-c243-862433037f30/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-23T15:44:55,745 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ca2e2dc7-7707-5b77-c243-862433037f30/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-23T15:44:55,746 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ca2e2dc7-7707-5b77-c243-862433037f30/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-23T15:44:55,746 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ca2e2dc7-7707-5b77-c243-862433037f30/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-23T15:44:55,747 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ca2e2dc7-7707-5b77-c243-862433037f30/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-23T15:44:55,748 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ca2e2dc7-7707-5b77-c243-862433037f30/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-23T15:44:55,748 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ca2e2dc7-7707-5b77-c243-862433037f30/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-23T15:44:55,749 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ca2e2dc7-7707-5b77-c243-862433037f30/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-23T15:44:55,750 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ca2e2dc7-7707-5b77-c243-862433037f30/nfs.dump.dir in system properties and HBase conf 2024-11-23T15:44:55,750 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ca2e2dc7-7707-5b77-c243-862433037f30/java.io.tmpdir in system properties and HBase conf 2024-11-23T15:44:55,751 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ca2e2dc7-7707-5b77-c243-862433037f30/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-23T15:44:55,751 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ca2e2dc7-7707-5b77-c243-862433037f30/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-23T15:44:55,752 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ca2e2dc7-7707-5b77-c243-862433037f30/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-23T15:44:56,993 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-11-23T15:44:57,067 INFO [Time-limited test {}] log.Log(170): Logging initialized @2908ms to org.eclipse.jetty.util.log.Slf4jLog 2024-11-23T15:44:57,133 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-23T15:44:57,190 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-23T15:44:57,209 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-23T15:44:57,209 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-23T15:44:57,210 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-23T15:44:57,222 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-23T15:44:57,225 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2f7d122b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ca2e2dc7-7707-5b77-c243-862433037f30/hadoop.log.dir/,AVAILABLE} 2024-11-23T15:44:57,227 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@189e1f23{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-23T15:44:57,417 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2a105fbd{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ca2e2dc7-7707-5b77-c243-862433037f30/java.io.tmpdir/jetty-localhost-34445-hadoop-hdfs-3_4_1-tests_jar-_-any-18260538203064917640/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-23T15:44:57,434 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2d863f4f{HTTP/1.1, (http/1.1)}{localhost:34445} 2024-11-23T15:44:57,434 INFO [Time-limited test {}] server.Server(415): Started @3276ms 2024-11-23T15:44:57,972 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-23T15:44:57,980 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-23T15:44:57,981 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-23T15:44:57,981 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-23T15:44:57,981 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-23T15:44:57,982 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7f592c6a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ca2e2dc7-7707-5b77-c243-862433037f30/hadoop.log.dir/,AVAILABLE} 2024-11-23T15:44:57,982 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4dc7b16b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-23T15:44:58,080 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1afaf33c{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ca2e2dc7-7707-5b77-c243-862433037f30/java.io.tmpdir/jetty-localhost-45891-hadoop-hdfs-3_4_1-tests_jar-_-any-14646456201476251125/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-23T15:44:58,080 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@70d6e3ac{HTTP/1.1, (http/1.1)}{localhost:45891} 2024-11-23T15:44:58,081 INFO [Time-limited test {}] server.Server(415): Started @3922ms 2024-11-23T15:44:58,128 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-23T15:44:58,229 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-23T15:44:58,236 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-23T15:44:58,239 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-23T15:44:58,239 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-23T15:44:58,240 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-23T15:44:58,241 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@630da62d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ca2e2dc7-7707-5b77-c243-862433037f30/hadoop.log.dir/,AVAILABLE} 2024-11-23T15:44:58,241 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@56b22eb1{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-23T15:44:58,380 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@61dd7f65{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ca2e2dc7-7707-5b77-c243-862433037f30/java.io.tmpdir/jetty-localhost-37193-hadoop-hdfs-3_4_1-tests_jar-_-any-1868130172430942127/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-23T15:44:58,381 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6d12fc1e{HTTP/1.1, (http/1.1)}{localhost:37193} 2024-11-23T15:44:58,381 INFO [Time-limited test {}] server.Server(415): Started @4222ms 2024-11-23T15:44:58,384 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-23T15:44:58,440 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-23T15:44:58,447 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-23T15:44:58,454 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-23T15:44:58,454 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-23T15:44:58,455 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-23T15:44:58,456 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5dc716dd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ca2e2dc7-7707-5b77-c243-862433037f30/hadoop.log.dir/,AVAILABLE} 2024-11-23T15:44:58,457 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@403b9e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-23T15:44:58,563 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2b1d6f1c{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ca2e2dc7-7707-5b77-c243-862433037f30/java.io.tmpdir/jetty-localhost-45685-hadoop-hdfs-3_4_1-tests_jar-_-any-894422934703369445/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-23T15:44:58,564 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2fd89fa9{HTTP/1.1, (http/1.1)}{localhost:45685} 2024-11-23T15:44:58,564 INFO [Time-limited test {}] server.Server(415): Started @4406ms 2024-11-23T15:44:58,567 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-23T15:44:59,338 WARN [Thread-120 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ca2e2dc7-7707-5b77-c243-862433037f30/cluster_dc00375d-5416-8448-2106-d59715ddd12b/data/data1/current/BP-672935730-172.17.0.2-1732376696384/current, will proceed with Du for space computation calculation, 2024-11-23T15:44:59,338 WARN [Thread-121 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ca2e2dc7-7707-5b77-c243-862433037f30/cluster_dc00375d-5416-8448-2106-d59715ddd12b/data/data2/current/BP-672935730-172.17.0.2-1732376696384/current, will proceed with Du for space computation calculation, 2024-11-23T15:44:59,379 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-23T15:44:59,395 WARN [Thread-132 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ca2e2dc7-7707-5b77-c243-862433037f30/cluster_dc00375d-5416-8448-2106-d59715ddd12b/data/data4/current/BP-672935730-172.17.0.2-1732376696384/current, will proceed with Du for space computation calculation, 2024-11-23T15:44:59,399 WARN [Thread-131 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ca2e2dc7-7707-5b77-c243-862433037f30/cluster_dc00375d-5416-8448-2106-d59715ddd12b/data/data3/current/BP-672935730-172.17.0.2-1732376696384/current, will proceed with Du for space computation calculation, 2024-11-23T15:44:59,428 WARN [Thread-81 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-23T15:44:59,450 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xff4c0c14df3fda2d with lease ID 0x2eaf06a4b086825f: Processing first storage report for DS-0d3025cd-3ee6-4429-861f-637602cae18e from datanode DatanodeRegistration(127.0.0.1:45071, datanodeUuid=6b35f311-3109-4dc8-8ae5-a1a17f59c404, infoPort=42603, infoSecurePort=0, ipcPort=39439, storageInfo=lv=-57;cid=testClusterID;nsid=342727408;c=1732376696384) 2024-11-23T15:44:59,451 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xff4c0c14df3fda2d with lease ID 0x2eaf06a4b086825f: from storage DS-0d3025cd-3ee6-4429-861f-637602cae18e node DatanodeRegistration(127.0.0.1:45071, datanodeUuid=6b35f311-3109-4dc8-8ae5-a1a17f59c404, infoPort=42603, infoSecurePort=0, ipcPort=39439, storageInfo=lv=-57;cid=testClusterID;nsid=342727408;c=1732376696384), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-23T15:44:59,452 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x83d52905a067945 with lease ID 0x2eaf06a4b0868260: Processing first storage report for DS-0903c522-593e-4a8d-bed1-b54c0bf2b73b from datanode DatanodeRegistration(127.0.0.1:43305, datanodeUuid=83b82bbc-f283-4239-9c47-b4d0e5073c91, infoPort=33133, infoSecurePort=0, ipcPort=36395, storageInfo=lv=-57;cid=testClusterID;nsid=342727408;c=1732376696384) 2024-11-23T15:44:59,452 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x83d52905a067945 with lease ID 0x2eaf06a4b0868260: from storage DS-0903c522-593e-4a8d-bed1-b54c0bf2b73b node DatanodeRegistration(127.0.0.1:43305, datanodeUuid=83b82bbc-f283-4239-9c47-b4d0e5073c91, infoPort=33133, infoSecurePort=0, ipcPort=36395, storageInfo=lv=-57;cid=testClusterID;nsid=342727408;c=1732376696384), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-23T15:44:59,453 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x83d52905a067945 with lease ID 0x2eaf06a4b0868260: Processing first storage report for DS-26d20f44-5c84-4e02-81a6-cd4ee5e42234 from datanode DatanodeRegistration(127.0.0.1:43305, datanodeUuid=83b82bbc-f283-4239-9c47-b4d0e5073c91, infoPort=33133, infoSecurePort=0, ipcPort=36395, storageInfo=lv=-57;cid=testClusterID;nsid=342727408;c=1732376696384) 2024-11-23T15:44:59,453 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x83d52905a067945 with lease ID 0x2eaf06a4b0868260: from storage DS-26d20f44-5c84-4e02-81a6-cd4ee5e42234 node DatanodeRegistration(127.0.0.1:43305, datanodeUuid=83b82bbc-f283-4239-9c47-b4d0e5073c91, infoPort=33133, infoSecurePort=0, ipcPort=36395, storageInfo=lv=-57;cid=testClusterID;nsid=342727408;c=1732376696384), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-23T15:44:59,453 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xff4c0c14df3fda2d with lease ID 0x2eaf06a4b086825f: Processing first storage report for DS-b67cfe1c-d433-402e-a7a7-f1fcd46e6e56 from datanode DatanodeRegistration(127.0.0.1:45071, datanodeUuid=6b35f311-3109-4dc8-8ae5-a1a17f59c404, infoPort=42603, infoSecurePort=0, ipcPort=39439, storageInfo=lv=-57;cid=testClusterID;nsid=342727408;c=1732376696384) 2024-11-23T15:44:59,453 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xff4c0c14df3fda2d with lease ID 0x2eaf06a4b086825f: from storage DS-b67cfe1c-d433-402e-a7a7-f1fcd46e6e56 node DatanodeRegistration(127.0.0.1:45071, datanodeUuid=6b35f311-3109-4dc8-8ae5-a1a17f59c404, infoPort=42603, infoSecurePort=0, ipcPort=39439, storageInfo=lv=-57;cid=testClusterID;nsid=342727408;c=1732376696384), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-23T15:44:59,462 WARN [Thread-142 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ca2e2dc7-7707-5b77-c243-862433037f30/cluster_dc00375d-5416-8448-2106-d59715ddd12b/data/data5/current/BP-672935730-172.17.0.2-1732376696384/current, will proceed with Du for space computation calculation, 2024-11-23T15:44:59,464 WARN [Thread-143 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ca2e2dc7-7707-5b77-c243-862433037f30/cluster_dc00375d-5416-8448-2106-d59715ddd12b/data/data6/current/BP-672935730-172.17.0.2-1732376696384/current, will proceed with Du for space computation calculation, 2024-11-23T15:44:59,487 WARN [Thread-103 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-23T15:44:59,492 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x64badc4d0d1095e0 with lease ID 0x2eaf06a4b0868261: Processing first storage report for DS-574b886e-69b8-4f9e-89d1-eb2ad13216ee from datanode DatanodeRegistration(127.0.0.1:46329, datanodeUuid=289915e3-ee6c-43b0-81da-14b3cbffffb5, infoPort=44331, infoSecurePort=0, ipcPort=43065, storageInfo=lv=-57;cid=testClusterID;nsid=342727408;c=1732376696384) 2024-11-23T15:44:59,493 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x64badc4d0d1095e0 with lease ID 0x2eaf06a4b0868261: from storage DS-574b886e-69b8-4f9e-89d1-eb2ad13216ee node DatanodeRegistration(127.0.0.1:46329, datanodeUuid=289915e3-ee6c-43b0-81da-14b3cbffffb5, infoPort=44331, infoSecurePort=0, ipcPort=43065, storageInfo=lv=-57;cid=testClusterID;nsid=342727408;c=1732376696384), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-23T15:44:59,493 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x64badc4d0d1095e0 with lease ID 0x2eaf06a4b0868261: Processing first storage report for DS-e265d622-7da0-4ecd-b98c-0444a1475acc from datanode DatanodeRegistration(127.0.0.1:46329, datanodeUuid=289915e3-ee6c-43b0-81da-14b3cbffffb5, infoPort=44331, infoSecurePort=0, ipcPort=43065, storageInfo=lv=-57;cid=testClusterID;nsid=342727408;c=1732376696384) 2024-11-23T15:44:59,493 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x64badc4d0d1095e0 with lease ID 0x2eaf06a4b0868261: from storage DS-e265d622-7da0-4ecd-b98c-0444a1475acc node DatanodeRegistration(127.0.0.1:46329, datanodeUuid=289915e3-ee6c-43b0-81da-14b3cbffffb5, infoPort=44331, infoSecurePort=0, ipcPort=43065, storageInfo=lv=-57;cid=testClusterID;nsid=342727408;c=1732376696384), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-23T15:44:59,565 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ca2e2dc7-7707-5b77-c243-862433037f30 2024-11-23T15:44:59,648 WARN [Time-limited test {}] erasurecode.ErasureCodeNative(55): ISA-L support is not available in your platform... using builtin-java codec where applicable 2024-11-23T15:44:59,719 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestHBaseWalOnEC#testReadWrite[0] Thread=156, OpenFileDescriptor=393, MaxFileDescriptor=1048576, SystemLoadAverage=309, ProcessCount=11, AvailableMemoryMB=8631 2024-11-23T15:44:59,722 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=3, rsPorts=, rsClass=null, numDataNodes=3, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-23T15:44:59,730 INFO [Time-limited test {}] hbase.HBaseTestingUtil(821): NOT STARTING DFS 2024-11-23T15:44:59,840 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ca2e2dc7-7707-5b77-c243-862433037f30/cluster_dc00375d-5416-8448-2106-d59715ddd12b/zookeeper_0, clientPort=64492, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ca2e2dc7-7707-5b77-c243-862433037f30/cluster_dc00375d-5416-8448-2106-d59715ddd12b/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ca2e2dc7-7707-5b77-c243-862433037f30/cluster_dc00375d-5416-8448-2106-d59715ddd12b/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-23T15:44:59,851 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=64492 2024-11-23T15:44:59,863 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T15:44:59,866 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T15:44:59,995 WARN [Time-limited test {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-23T15:44:59,996 WARN [Time-limited test {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-23T15:45:00,056 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-648790401_22 at /127.0.0.1:39380 [Receiving block BP-672935730-172.17.0.2-1732376696384:blk_-9223372036854775792_1001] {}] datanode.DataXceiver(331): 127.0.0.1:46329:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:39380 dst: /127.0.0.1:46329 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:45:00,092 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_-9223372036854775792_1002 (size=7) 2024-11-23T15:45:00,486 WARN [Time-limited test {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-23T15:45:00,497 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:37303/user/jenkins/test-data/586bc8a6-e95a-79e2-f88e-ae2dc972e8e6 with version=8 2024-11-23T15:45:00,498 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:37303/user/jenkins/test-data/586bc8a6-e95a-79e2-f88e-ae2dc972e8e6/hbase-staging 2024-11-23T15:45:00,606 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-11-23T15:45:00,897 INFO [Time-limited test {}] client.ConnectionUtils(128): master/b712f9af2c12:0 server-side Connection retries=45 2024-11-23T15:45:00,907 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-23T15:45:00,907 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-23T15:45:00,913 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-23T15:45:00,914 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-23T15:45:00,915 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-23T15:45:01,095 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-23T15:45:01,179 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-11-23T15:45:01,190 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-11-23T15:45:01,194 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-23T15:45:01,225 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 15123 (auto-detected) 2024-11-23T15:45:01,226 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-11-23T15:45:01,245 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:34855 2024-11-23T15:45:01,271 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:34855 connecting to ZooKeeper ensemble=127.0.0.1:64492 2024-11-23T15:45:01,399 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:348550x0, quorum=127.0.0.1:64492, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-23T15:45:01,401 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:34855-0x10169a8366e0000 connected 2024-11-23T15:45:01,509 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T15:45:01,513 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T15:45:01,529 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:34855-0x10169a8366e0000, quorum=127.0.0.1:64492, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-23T15:45:01,534 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:37303/user/jenkins/test-data/586bc8a6-e95a-79e2-f88e-ae2dc972e8e6, hbase.cluster.distributed=false 2024-11-23T15:45:01,570 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:34855-0x10169a8366e0000, quorum=127.0.0.1:64492, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-23T15:45:01,583 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=34855 2024-11-23T15:45:01,584 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=34855 2024-11-23T15:45:01,584 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=34855 2024-11-23T15:45:01,595 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=34855 2024-11-23T15:45:01,601 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=34855 2024-11-23T15:45:01,725 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/b712f9af2c12:0 server-side Connection retries=45 2024-11-23T15:45:01,727 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-23T15:45:01,727 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-23T15:45:01,727 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-23T15:45:01,727 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-23T15:45:01,728 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-23T15:45:01,730 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-23T15:45:01,733 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-23T15:45:01,734 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:34219 2024-11-23T15:45:01,736 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:34219 connecting to ZooKeeper ensemble=127.0.0.1:64492 2024-11-23T15:45:01,737 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T15:45:01,745 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T15:45:01,774 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:342190x0, quorum=127.0.0.1:64492, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-23T15:45:01,774 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:342190x0, quorum=127.0.0.1:64492, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-23T15:45:01,775 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:34219-0x10169a8366e0001 connected 2024-11-23T15:45:01,780 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-23T15:45:01,791 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-23T15:45:01,794 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34219-0x10169a8366e0001, quorum=127.0.0.1:64492, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-23T15:45:01,802 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34219-0x10169a8366e0001, quorum=127.0.0.1:64492, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-23T15:45:01,805 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=34219 2024-11-23T15:45:01,805 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=34219 2024-11-23T15:45:01,806 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=34219 2024-11-23T15:45:01,812 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=34219 2024-11-23T15:45:01,813 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=34219 2024-11-23T15:45:01,835 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/b712f9af2c12:0 server-side Connection retries=45 2024-11-23T15:45:01,835 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-23T15:45:01,837 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-23T15:45:01,838 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-23T15:45:01,839 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-23T15:45:01,839 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-23T15:45:01,839 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-23T15:45:01,840 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-23T15:45:01,841 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:45337 2024-11-23T15:45:01,843 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:45337 connecting to ZooKeeper ensemble=127.0.0.1:64492 2024-11-23T15:45:01,844 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T15:45:01,848 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T15:45:01,915 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:453370x0, quorum=127.0.0.1:64492, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-23T15:45:01,916 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:453370x0, quorum=127.0.0.1:64492, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-23T15:45:01,916 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:45337-0x10169a8366e0002 connected 2024-11-23T15:45:01,916 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-23T15:45:01,917 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-23T15:45:01,918 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45337-0x10169a8366e0002, quorum=127.0.0.1:64492, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-23T15:45:01,920 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45337-0x10169a8366e0002, quorum=127.0.0.1:64492, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-23T15:45:01,922 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=45337 2024-11-23T15:45:01,923 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=45337 2024-11-23T15:45:01,923 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=45337 2024-11-23T15:45:01,924 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=45337 2024-11-23T15:45:01,925 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=45337 2024-11-23T15:45:01,951 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/b712f9af2c12:0 server-side Connection retries=45 2024-11-23T15:45:01,951 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-23T15:45:01,952 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-23T15:45:01,952 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-23T15:45:01,952 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-23T15:45:01,952 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-23T15:45:01,952 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-23T15:45:01,953 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-23T15:45:01,959 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:36473 2024-11-23T15:45:01,962 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:36473 connecting to ZooKeeper ensemble=127.0.0.1:64492 2024-11-23T15:45:01,963 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T15:45:01,967 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T15:45:01,993 DEBUG [pool-77-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:364730x0, quorum=127.0.0.1:64492, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-23T15:45:01,994 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:36473-0x10169a8366e0003 connected 2024-11-23T15:45:01,995 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36473-0x10169a8366e0003, quorum=127.0.0.1:64492, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-23T15:45:01,995 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-23T15:45:01,998 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-23T15:45:01,999 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36473-0x10169a8366e0003, quorum=127.0.0.1:64492, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-23T15:45:02,001 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36473-0x10169a8366e0003, quorum=127.0.0.1:64492, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-23T15:45:02,002 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=36473 2024-11-23T15:45:02,002 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=36473 2024-11-23T15:45:02,003 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=36473 2024-11-23T15:45:02,003 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=36473 2024-11-23T15:45:02,003 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=36473 2024-11-23T15:45:02,019 DEBUG [M:0;b712f9af2c12:34855 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;b712f9af2c12:34855 2024-11-23T15:45:02,020 INFO [master/b712f9af2c12:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/b712f9af2c12,34855,1732376700708 2024-11-23T15:45:02,035 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34219-0x10169a8366e0001, quorum=127.0.0.1:64492, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-23T15:45:02,035 DEBUG [pool-77-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36473-0x10169a8366e0003, quorum=127.0.0.1:64492, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-23T15:45:02,035 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34855-0x10169a8366e0000, quorum=127.0.0.1:64492, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-23T15:45:02,036 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45337-0x10169a8366e0002, quorum=127.0.0.1:64492, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-23T15:45:02,038 DEBUG [master/b712f9af2c12:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:34855-0x10169a8366e0000, quorum=127.0.0.1:64492, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/b712f9af2c12,34855,1732376700708 2024-11-23T15:45:02,068 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45337-0x10169a8366e0002, quorum=127.0.0.1:64492, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-23T15:45:02,068 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34219-0x10169a8366e0001, quorum=127.0.0.1:64492, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-23T15:45:02,068 DEBUG [pool-77-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36473-0x10169a8366e0003, quorum=127.0.0.1:64492, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-23T15:45:02,068 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45337-0x10169a8366e0002, quorum=127.0.0.1:64492, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T15:45:02,068 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34855-0x10169a8366e0000, quorum=127.0.0.1:64492, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T15:45:02,069 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34219-0x10169a8366e0001, quorum=127.0.0.1:64492, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T15:45:02,068 DEBUG [pool-77-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36473-0x10169a8366e0003, quorum=127.0.0.1:64492, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T15:45:02,071 DEBUG [master/b712f9af2c12:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:34855-0x10169a8366e0000, quorum=127.0.0.1:64492, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-23T15:45:02,073 INFO [master/b712f9af2c12:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/b712f9af2c12,34855,1732376700708 from backup master directory 2024-11-23T15:45:02,085 DEBUG [pool-77-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36473-0x10169a8366e0003, quorum=127.0.0.1:64492, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-23T15:45:02,085 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34855-0x10169a8366e0000, quorum=127.0.0.1:64492, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/b712f9af2c12,34855,1732376700708 2024-11-23T15:45:02,085 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34219-0x10169a8366e0001, quorum=127.0.0.1:64492, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-23T15:45:02,085 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45337-0x10169a8366e0002, quorum=127.0.0.1:64492, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-23T15:45:02,085 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34855-0x10169a8366e0000, quorum=127.0.0.1:64492, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-23T15:45:02,086 WARN [master/b712f9af2c12:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-23T15:45:02,087 INFO [master/b712f9af2c12:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=b712f9af2c12,34855,1732376700708 2024-11-23T15:45:02,088 INFO [master/b712f9af2c12:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-11-23T15:45:02,089 INFO [master/b712f9af2c12:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-11-23T15:45:02,161 DEBUG [master/b712f9af2c12:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:37303/user/jenkins/test-data/586bc8a6-e95a-79e2-f88e-ae2dc972e8e6/hbase.id] with ID: a40f148a-4c7f-4ecd-a664-1fc76cb54180 2024-11-23T15:45:02,161 DEBUG [master/b712f9af2c12:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:37303/user/jenkins/test-data/586bc8a6-e95a-79e2-f88e-ae2dc972e8e6/.tmp/hbase.id 2024-11-23T15:45:02,168 WARN [master/b712f9af2c12:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-23T15:45:02,168 WARN [master/b712f9af2c12:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-23T15:45:02,176 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-648790401_22 at /127.0.0.1:53398 [Receiving block BP-672935730-172.17.0.2-1732376696384:blk_-9223372036854775776_1003] {}] datanode.DataXceiver(331): 127.0.0.1:46329:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:53398 dst: /127.0.0.1:46329 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:45:02,182 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_-9223372036854775776_1004 (size=42) 2024-11-23T15:45:02,184 WARN [master/b712f9af2c12:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-23T15:45:02,184 DEBUG [master/b712f9af2c12:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:37303/user/jenkins/test-data/586bc8a6-e95a-79e2-f88e-ae2dc972e8e6/.tmp/hbase.id]:[hdfs://localhost:37303/user/jenkins/test-data/586bc8a6-e95a-79e2-f88e-ae2dc972e8e6/hbase.id] 2024-11-23T15:45:02,242 INFO [master/b712f9af2c12:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T15:45:02,251 INFO [master/b712f9af2c12:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-23T15:45:02,278 INFO [master/b712f9af2c12:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 22ms. 2024-11-23T15:45:02,306 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45337-0x10169a8366e0002, quorum=127.0.0.1:64492, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T15:45:02,306 DEBUG [pool-77-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36473-0x10169a8366e0003, quorum=127.0.0.1:64492, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T15:45:02,306 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34855-0x10169a8366e0000, quorum=127.0.0.1:64492, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T15:45:02,307 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34219-0x10169a8366e0001, quorum=127.0.0.1:64492, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T15:45:02,321 WARN [master/b712f9af2c12:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-23T15:45:02,321 WARN [master/b712f9af2c12:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-23T15:45:02,328 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-648790401_22 at /127.0.0.1:53420 [Receiving block BP-672935730-172.17.0.2-1732376696384:blk_-9223372036854775760_1005] {}] datanode.DataXceiver(331): 127.0.0.1:46329:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:53420 dst: /127.0.0.1:46329 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:45:02,337 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_-9223372036854775760_1006 (size=196) 2024-11-23T15:45:02,338 WARN [master/b712f9af2c12:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-23T15:45:02,360 INFO [master/b712f9af2c12:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-23T15:45:02,362 INFO [master/b712f9af2c12:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-23T15:45:02,369 INFO [master/b712f9af2c12:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-23T15:45:02,406 WARN [master/b712f9af2c12:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-23T15:45:02,406 WARN [master/b712f9af2c12:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-23T15:45:02,413 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-648790401_22 at /127.0.0.1:53426 [Receiving block BP-672935730-172.17.0.2-1732376696384:blk_-9223372036854775744_1007] {}] datanode.DataXceiver(331): 127.0.0.1:46329:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:53426 dst: /127.0.0.1:46329 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:45:02,428 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_-9223372036854775744_1008 (size=1189) 2024-11-23T15:45:02,454 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43305 is added to blk_-9223372036854775789_1002 (size=7) 2024-11-23T15:45:02,455 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45071 is added to blk_-9223372036854775788_1002 (size=7) 2024-11-23T15:45:02,830 WARN [master/b712f9af2c12:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-23T15:45:02,846 INFO [master/b712f9af2c12:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:37303/user/jenkins/test-data/586bc8a6-e95a-79e2-f88e-ae2dc972e8e6/MasterData/data/master/store 2024-11-23T15:45:02,870 WARN [master/b712f9af2c12:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-23T15:45:02,870 WARN [master/b712f9af2c12:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-23T15:45:02,875 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-648790401_22 at /127.0.0.1:53458 [Receiving block BP-672935730-172.17.0.2-1732376696384:blk_-9223372036854775728_1009] {}] datanode.DataXceiver(331): 127.0.0.1:46329:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:53458 dst: /127.0.0.1:46329 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:45:02,880 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_-9223372036854775728_1010 (size=34) 2024-11-23T15:45:02,881 WARN [master/b712f9af2c12:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-23T15:45:02,885 INFO [master/b712f9af2c12:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-11-23T15:45:02,888 DEBUG [master/b712f9af2c12:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T15:45:02,889 DEBUG [master/b712f9af2c12:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-23T15:45:02,889 INFO [master/b712f9af2c12:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-23T15:45:02,889 DEBUG [master/b712f9af2c12:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-23T15:45:02,890 DEBUG [master/b712f9af2c12:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-23T15:45:02,891 DEBUG [master/b712f9af2c12:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-23T15:45:02,891 INFO [master/b712f9af2c12:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-23T15:45:02,892 DEBUG [master/b712f9af2c12:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732376702889Disabling compacts and flushes for region at 1732376702889Disabling writes for close at 1732376702891 (+2 ms)Writing region close event to WAL at 1732376702891Closed at 1732376702891 2024-11-23T15:45:02,894 WARN [master/b712f9af2c12:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:37303/user/jenkins/test-data/586bc8a6-e95a-79e2-f88e-ae2dc972e8e6/MasterData/data/master/store/.initializing 2024-11-23T15:45:02,894 DEBUG [master/b712f9af2c12:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:37303/user/jenkins/test-data/586bc8a6-e95a-79e2-f88e-ae2dc972e8e6/MasterData/WALs/b712f9af2c12,34855,1732376700708 2024-11-23T15:45:02,903 INFO [master/b712f9af2c12:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-23T15:45:02,918 INFO [master/b712f9af2c12:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=b712f9af2c12%2C34855%2C1732376700708, suffix=, logDir=hdfs://localhost:37303/user/jenkins/test-data/586bc8a6-e95a-79e2-f88e-ae2dc972e8e6/MasterData/WALs/b712f9af2c12,34855,1732376700708, archiveDir=hdfs://localhost:37303/user/jenkins/test-data/586bc8a6-e95a-79e2-f88e-ae2dc972e8e6/MasterData/oldWALs, maxLogs=10 2024-11-23T15:45:02,950 DEBUG [master/b712f9af2c12:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/586bc8a6-e95a-79e2-f88e-ae2dc972e8e6/MasterData/WALs/b712f9af2c12,34855,1732376700708/b712f9af2c12%2C34855%2C1732376700708.1732376702923, exclude list is [], retry=0 2024-11-23T15:45:02,971 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(244): No decryptEncryptedDataEncryptionKey method in DFSClient, should be hadoop version with HDFS-12396 java.lang.NoSuchMethodException: org.apache.hadoop.hdfs.DFSClient.decryptEncryptedDataEncryptionKey(org.apache.hadoop.fs.FileEncryptionInfo) at java.lang.Class.getDeclaredMethod(Class.java:2675) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelperWithoutHDFS12396(FanOutOneBlockAsyncDFSOutputSaslHelper.java:183) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelper(FanOutOneBlockAsyncDFSOutputSaslHelper.java:242) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.(FanOutOneBlockAsyncDFSOutputSaslHelper.java:253) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.initialize(FanOutOneBlockAsyncDFSOutputHelper.java:413) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper$5.operationComplete(FanOutOneBlockAsyncDFSOutputHelper.java:472) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper$5.operationComplete(FanOutOneBlockAsyncDFSOutputHelper.java:467) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.NettyFutureUtils.lambda$addListener$0(NettyFutureUtils.java:56) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListener0(DefaultPromise.java:590) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListeners0(DefaultPromise.java:583) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListenersNow(DefaultPromise.java:559) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListeners(DefaultPromise.java:492) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.setValue0(DefaultPromise.java:636) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.setSuccess0(DefaultPromise.java:625) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.trySuccess(DefaultPromise.java:105) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPromise.trySuccess(DefaultChannelPromise.java:84) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.fulfillConnectPromise(AbstractEpollChannel.java:658) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.finishConnect(AbstractEpollChannel.java:696) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.epollOutReady(AbstractEpollChannel.java:567) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.processReady(EpollEventLoop.java:491) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:399) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:45:02,972 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43305,DS-0903c522-593e-4a8d-bed1-b54c0bf2b73b,DISK] 2024-11-23T15:45:02,972 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46329,DS-574b886e-69b8-4f9e-89d1-eb2ad13216ee,DISK] 2024-11-23T15:45:02,972 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:45071,DS-0d3025cd-3ee6-4429-861f-637602cae18e,DISK] 2024-11-23T15:45:02,975 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.ProtobufDecoder(117): Hadoop 3.3 and above shades protobuf. 2024-11-23T15:45:03,014 INFO [master/b712f9af2c12:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/586bc8a6-e95a-79e2-f88e-ae2dc972e8e6/MasterData/WALs/b712f9af2c12,34855,1732376700708/b712f9af2c12%2C34855%2C1732376700708.1732376702923 2024-11-23T15:45:03,015 DEBUG [master/b712f9af2c12:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:44331:44331),(127.0.0.1/127.0.0.1:33133:33133),(127.0.0.1/127.0.0.1:42603:42603)] 2024-11-23T15:45:03,016 DEBUG [master/b712f9af2c12:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-23T15:45:03,016 DEBUG [master/b712f9af2c12:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T15:45:03,020 DEBUG [master/b712f9af2c12:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-23T15:45:03,021 DEBUG [master/b712f9af2c12:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-23T15:45:03,060 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-23T15:45:03,084 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-23T15:45:03,089 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:45:03,092 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T15:45:03,092 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-23T15:45:03,096 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-23T15:45:03,096 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:45:03,097 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T15:45:03,097 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-23T15:45:03,100 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-23T15:45:03,100 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:45:03,101 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T15:45:03,101 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-23T15:45:03,104 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-23T15:45:03,104 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:45:03,106 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T15:45:03,106 DEBUG [master/b712f9af2c12:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-23T15:45:03,110 DEBUG [master/b712f9af2c12:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37303/user/jenkins/test-data/586bc8a6-e95a-79e2-f88e-ae2dc972e8e6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-23T15:45:03,112 DEBUG [master/b712f9af2c12:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37303/user/jenkins/test-data/586bc8a6-e95a-79e2-f88e-ae2dc972e8e6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-23T15:45:03,117 DEBUG [master/b712f9af2c12:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-23T15:45:03,118 DEBUG [master/b712f9af2c12:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-23T15:45:03,122 DEBUG [master/b712f9af2c12:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-23T15:45:03,125 DEBUG [master/b712f9af2c12:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-23T15:45:03,137 DEBUG [master/b712f9af2c12:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37303/user/jenkins/test-data/586bc8a6-e95a-79e2-f88e-ae2dc972e8e6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-23T15:45:03,138 INFO [master/b712f9af2c12:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=62474803, jitterRate=-0.06905288994312286}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-23T15:45:03,148 DEBUG [master/b712f9af2c12:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732376703035Initializing all the Stores at 1732376703037 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732376703037Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732376703038 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732376703038Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732376703038Cleaning up temporary data from old regions at 1732376703118 (+80 ms)Region opened successfully at 1732376703147 (+29 ms) 2024-11-23T15:45:03,149 INFO [master/b712f9af2c12:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-23T15:45:03,185 DEBUG [master/b712f9af2c12:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@360c6047, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=b712f9af2c12/172.17.0.2:0 2024-11-23T15:45:03,219 INFO [master/b712f9af2c12:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-23T15:45:03,232 INFO [master/b712f9af2c12:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-23T15:45:03,232 INFO [master/b712f9af2c12:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-23T15:45:03,236 INFO [master/b712f9af2c12:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-23T15:45:03,238 INFO [master/b712f9af2c12:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 1 msec 2024-11-23T15:45:03,244 INFO [master/b712f9af2c12:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 5 msec 2024-11-23T15:45:03,244 INFO [master/b712f9af2c12:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-23T15:45:03,280 INFO [master/b712f9af2c12:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-23T15:45:03,291 DEBUG [master/b712f9af2c12:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34855-0x10169a8366e0000, quorum=127.0.0.1:64492, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-23T15:45:03,351 DEBUG [master/b712f9af2c12:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-23T15:45:03,354 INFO [master/b712f9af2c12:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-23T15:45:03,356 DEBUG [master/b712f9af2c12:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34855-0x10169a8366e0000, quorum=127.0.0.1:64492, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-23T15:45:03,368 DEBUG [master/b712f9af2c12:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-23T15:45:03,371 INFO [master/b712f9af2c12:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-23T15:45:03,375 DEBUG [master/b712f9af2c12:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34855-0x10169a8366e0000, quorum=127.0.0.1:64492, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-23T15:45:03,384 DEBUG [master/b712f9af2c12:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-23T15:45:03,390 DEBUG [master/b712f9af2c12:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34855-0x10169a8366e0000, quorum=127.0.0.1:64492, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-23T15:45:03,401 DEBUG [master/b712f9af2c12:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-23T15:45:03,422 DEBUG [master/b712f9af2c12:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34855-0x10169a8366e0000, quorum=127.0.0.1:64492, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-23T15:45:03,433 DEBUG [master/b712f9af2c12:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-23T15:45:03,448 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34219-0x10169a8366e0001, quorum=127.0.0.1:64492, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-23T15:45:03,448 DEBUG [pool-77-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36473-0x10169a8366e0003, quorum=127.0.0.1:64492, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-23T15:45:03,449 DEBUG [pool-77-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36473-0x10169a8366e0003, quorum=127.0.0.1:64492, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T15:45:03,448 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45337-0x10169a8366e0002, quorum=127.0.0.1:64492, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-23T15:45:03,449 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34219-0x10169a8366e0001, quorum=127.0.0.1:64492, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T15:45:03,450 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45337-0x10169a8366e0002, quorum=127.0.0.1:64492, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T15:45:03,450 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34855-0x10169a8366e0000, quorum=127.0.0.1:64492, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-23T15:45:03,450 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34855-0x10169a8366e0000, quorum=127.0.0.1:64492, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T15:45:03,454 INFO [master/b712f9af2c12:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=b712f9af2c12,34855,1732376700708, sessionid=0x10169a8366e0000, setting cluster-up flag (Was=false) 2024-11-23T15:45:03,509 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34855-0x10169a8366e0000, quorum=127.0.0.1:64492, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T15:45:03,509 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45337-0x10169a8366e0002, quorum=127.0.0.1:64492, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T15:45:03,509 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34219-0x10169a8366e0001, quorum=127.0.0.1:64492, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T15:45:03,509 DEBUG [pool-77-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36473-0x10169a8366e0003, quorum=127.0.0.1:64492, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T15:45:03,534 DEBUG [master/b712f9af2c12:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-23T15:45:03,537 DEBUG [master/b712f9af2c12:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=b712f9af2c12,34855,1732376700708 2024-11-23T15:45:03,559 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34855-0x10169a8366e0000, quorum=127.0.0.1:64492, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T15:45:03,559 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34219-0x10169a8366e0001, quorum=127.0.0.1:64492, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T15:45:03,559 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45337-0x10169a8366e0002, quorum=127.0.0.1:64492, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T15:45:03,562 DEBUG [pool-77-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36473-0x10169a8366e0003, quorum=127.0.0.1:64492, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T15:45:03,585 DEBUG [master/b712f9af2c12:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-23T15:45:03,587 DEBUG [master/b712f9af2c12:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=b712f9af2c12,34855,1732376700708 2024-11-23T15:45:03,594 INFO [master/b712f9af2c12:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:37303/user/jenkins/test-data/586bc8a6-e95a-79e2-f88e-ae2dc972e8e6/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-23T15:45:03,608 INFO [RS:0;b712f9af2c12:34219 {}] regionserver.HRegionServer(746): ClusterId : a40f148a-4c7f-4ecd-a664-1fc76cb54180 2024-11-23T15:45:03,609 INFO [RS:1;b712f9af2c12:45337 {}] regionserver.HRegionServer(746): ClusterId : a40f148a-4c7f-4ecd-a664-1fc76cb54180 2024-11-23T15:45:03,611 DEBUG [RS:0;b712f9af2c12:34219 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-23T15:45:03,611 INFO [RS:2;b712f9af2c12:36473 {}] regionserver.HRegionServer(746): ClusterId : a40f148a-4c7f-4ecd-a664-1fc76cb54180 2024-11-23T15:45:03,611 DEBUG [RS:1;b712f9af2c12:45337 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-23T15:45:03,611 DEBUG [RS:2;b712f9af2c12:36473 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-23T15:45:03,633 DEBUG [RS:1;b712f9af2c12:45337 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-23T15:45:03,633 DEBUG [RS:2;b712f9af2c12:36473 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-23T15:45:03,633 DEBUG [RS:0;b712f9af2c12:34219 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-23T15:45:03,634 DEBUG [RS:1;b712f9af2c12:45337 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-23T15:45:03,634 DEBUG [RS:2;b712f9af2c12:36473 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-23T15:45:03,634 DEBUG [RS:0;b712f9af2c12:34219 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-23T15:45:03,646 DEBUG [RS:2;b712f9af2c12:36473 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-23T15:45:03,646 DEBUG [RS:1;b712f9af2c12:45337 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-23T15:45:03,646 DEBUG [RS:0;b712f9af2c12:34219 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-23T15:45:03,646 DEBUG [RS:0;b712f9af2c12:34219 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@21e070c0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=b712f9af2c12/172.17.0.2:0 2024-11-23T15:45:03,646 DEBUG [RS:1;b712f9af2c12:45337 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2331431d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=b712f9af2c12/172.17.0.2:0 2024-11-23T15:45:03,646 DEBUG [RS:2;b712f9af2c12:36473 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@50ec1353, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=b712f9af2c12/172.17.0.2:0 2024-11-23T15:45:03,668 DEBUG [RS:2;b712f9af2c12:36473 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:2;b712f9af2c12:36473 2024-11-23T15:45:03,668 DEBUG [RS:1;b712f9af2c12:45337 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;b712f9af2c12:45337 2024-11-23T15:45:03,671 DEBUG [RS:0;b712f9af2c12:34219 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;b712f9af2c12:34219 2024-11-23T15:45:03,672 INFO [RS:1;b712f9af2c12:45337 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-23T15:45:03,672 INFO [RS:0;b712f9af2c12:34219 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-23T15:45:03,672 INFO [RS:1;b712f9af2c12:45337 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-23T15:45:03,672 INFO [RS:0;b712f9af2c12:34219 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-23T15:45:03,672 DEBUG [RS:1;b712f9af2c12:45337 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-23T15:45:03,672 DEBUG [RS:0;b712f9af2c12:34219 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-23T15:45:03,673 INFO [RS:2;b712f9af2c12:36473 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-23T15:45:03,673 INFO [RS:2;b712f9af2c12:36473 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-23T15:45:03,673 DEBUG [RS:2;b712f9af2c12:36473 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-23T15:45:03,675 INFO [RS:1;b712f9af2c12:45337 {}] regionserver.HRegionServer(2659): reportForDuty to master=b712f9af2c12,34855,1732376700708 with port=45337, startcode=1732376701834 2024-11-23T15:45:03,675 INFO [RS:0;b712f9af2c12:34219 {}] regionserver.HRegionServer(2659): reportForDuty to master=b712f9af2c12,34855,1732376700708 with port=34219, startcode=1732376701684 2024-11-23T15:45:03,675 INFO [RS:2;b712f9af2c12:36473 {}] regionserver.HRegionServer(2659): reportForDuty to master=b712f9af2c12,34855,1732376700708 with port=36473, startcode=1732376701950 2024-11-23T15:45:03,687 DEBUG [RS:1;b712f9af2c12:45337 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-23T15:45:03,687 DEBUG [RS:2;b712f9af2c12:36473 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-23T15:45:03,687 DEBUG [RS:0;b712f9af2c12:34219 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-23T15:45:03,697 DEBUG [master/b712f9af2c12:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-23T15:45:03,707 INFO [master/b712f9af2c12:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-23T15:45:03,715 INFO [master/b712f9af2c12:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-23T15:45:03,721 DEBUG [master/b712f9af2c12:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: b712f9af2c12,34855,1732376700708 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-23T15:45:03,728 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55375, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-11-23T15:45:03,728 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52293, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-11-23T15:45:03,735 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34855 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:667) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-11-23T15:45:03,735 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57807, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-11-23T15:45:03,741 DEBUG [master/b712f9af2c12:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/b712f9af2c12:0, corePoolSize=5, maxPoolSize=5 2024-11-23T15:45:03,742 DEBUG [master/b712f9af2c12:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/b712f9af2c12:0, corePoolSize=5, maxPoolSize=5 2024-11-23T15:45:03,742 DEBUG [master/b712f9af2c12:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/b712f9af2c12:0, corePoolSize=5, maxPoolSize=5 2024-11-23T15:45:03,742 DEBUG [master/b712f9af2c12:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/b712f9af2c12:0, corePoolSize=5, maxPoolSize=5 2024-11-23T15:45:03,742 DEBUG [master/b712f9af2c12:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/b712f9af2c12:0, corePoolSize=10, maxPoolSize=10 2024-11-23T15:45:03,742 DEBUG [master/b712f9af2c12:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/b712f9af2c12:0, corePoolSize=1, maxPoolSize=1 2024-11-23T15:45:03,743 DEBUG [master/b712f9af2c12:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/b712f9af2c12:0, corePoolSize=2, maxPoolSize=2 2024-11-23T15:45:03,743 DEBUG [master/b712f9af2c12:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/b712f9af2c12:0, corePoolSize=1, maxPoolSize=1 2024-11-23T15:45:03,743 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34855 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:667) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-11-23T15:45:03,744 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34855 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:667) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-11-23T15:45:03,754 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-23T15:45:03,754 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-23T15:45:03,762 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:45:03,764 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-23T15:45:03,772 DEBUG [RS:2;b712f9af2c12:36473 {}] regionserver.HRegionServer(2683): Master is not running yet 2024-11-23T15:45:03,772 DEBUG [RS:0;b712f9af2c12:34219 {}] regionserver.HRegionServer(2683): Master is not running yet 2024-11-23T15:45:03,772 DEBUG [RS:1;b712f9af2c12:45337 {}] regionserver.HRegionServer(2683): Master is not running yet 2024-11-23T15:45:03,773 WARN [RS:0;b712f9af2c12:34219 {}] regionserver.HRegionServer(841): reportForDuty failed; sleeping 100 ms and then retrying. 2024-11-23T15:45:03,773 WARN [RS:1;b712f9af2c12:45337 {}] regionserver.HRegionServer(841): reportForDuty failed; sleeping 100 ms and then retrying. 2024-11-23T15:45:03,773 WARN [RS:2;b712f9af2c12:36473 {}] regionserver.HRegionServer(841): reportForDuty failed; sleeping 100 ms and then retrying. 2024-11-23T15:45:03,781 INFO [master/b712f9af2c12:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732376733781 2024-11-23T15:45:03,783 INFO [master/b712f9af2c12:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-23T15:45:03,784 WARN [IPC Server handler 2 on default port 37303 {}] blockmanagement.BlockPlacementPolicyRackFaultTolerant(145): Only able to place 2 of total expected 3 (maxNodesPerRack=3, numOfReplicas=3) nodes evenly across racks, falling back to evenly place on the remaining racks. This may not guarantee rack-level fault tolerance. Please check if the racks are configured properly. 2024-11-23T15:45:03,784 WARN [IPC Server handler 2 on default port 37303 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-23T15:45:03,784 INFO [master/b712f9af2c12:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-23T15:45:03,784 WARN [IPC Server handler 2 on default port 37303 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-23T15:45:03,785 WARN [IPC Server handler 2 on default port 37303 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-23T15:45:03,789 INFO [master/b712f9af2c12:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-23T15:45:03,789 INFO [master/b712f9af2c12:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-23T15:45:03,790 INFO [master/b712f9af2c12:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-23T15:45:03,790 INFO [master/b712f9af2c12:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-23T15:45:03,792 DEBUG [PEWorker-1 {}] util.FSTableDescriptors(635): Failed write hdfs://localhost:37303/user/jenkins/test-data/586bc8a6-e95a-79e2-f88e-ae2dc972e8e6/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321; retrying up to 10 times org.apache.hadoop.ipc.RemoteException: File /user/jenkins/test-data/586bc8a6-e95a-79e2-f88e-ae2dc972e8e6/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 could only be written to 2 of the 3 required nodes for RS-3-2-1024k. There are 3 datanode(s) running and 3 node(s) are excluded in this operation. at org.apache.hadoop.hdfs.server.blockmanagement.BlockManager.chooseTarget4NewBlock(BlockManager.java:2480) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.chooseTargetForNewBlock(FSDirWriteFileOp.java:293) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.getAdditionalBlock(FSNamesystem.java:3075) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.addBlock(NameNodeRpcServer.java:932) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.addBlock(ClientNamenodeProtocolServerSideTranslatorPB.java:603) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.addBlock(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$addBlock$11(ClientNamenodeProtocolTranslatorPB.java:500) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.addBlock(ClientNamenodeProtocolTranslatorPB.java:500) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor6.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.addBlock(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor6.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy48.addBlock(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor6.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy48.addBlock(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor6.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy48.addBlock(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor6.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy48.addBlock(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor6.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy48.addBlock(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DFSOutputStream.addBlock(DFSOutputStream.java:1143) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSStripedOutputStream.allocateNewBlock(DFSStripedOutputStream.java:508) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSStripedOutputStream.writeChunk(DFSStripedOutputStream.java:561) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FSOutputSummer.writeChecksumChunks(FSOutputSummer.java:220) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.fs.FSOutputSummer.flushBuffer(FSOutputSummer.java:165) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.fs.FSOutputSummer.flushBuffer(FSOutputSummer.java:146) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSStripedOutputStream.closeImpl(DFSStripedOutputStream.java:1234) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSOutputStream.close(DFSOutputStream.java:861) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FSDataOutputStream$PositionCache.close(FSDataOutputStream.java:77) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.fs.FSDataOutputStream.close(FSDataOutputStream.java:106) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hbase.util.FSTableDescriptors.writeTableDescriptor(FSTableDescriptors.java:631) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FSTableDescriptors.tryUpdateAndGetMetaTableDescriptor(FSTableDescriptors.java:159) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.master.procedure.InitMetaProcedure.writeFsLayout(InitMetaProcedure.java:87) ~[classes/:?] at org.apache.hadoop.hbase.master.procedure.InitMetaProcedure.executeFromState(InitMetaProcedure.java:103) ~[classes/:?] at org.apache.hadoop.hbase.master.procedure.InitMetaProcedure.executeFromState(InitMetaProcedure.java:55) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.StateMachineProcedure.execute(StateMachineProcedure.java:188) ~[hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.Procedure.doExecute(Procedure.java:941) ~[hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.ProcedureExecutor.execProcedure(ProcedureExecutor.java:1825) ~[hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.ProcedureExecutor.executeProcedure(ProcedureExecutor.java:1503) ~[hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.ProcedureExecutor$WorkerThread.runProcedure(ProcedureExecutor.java:2154) ~[hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.ProcedureExecutor$WorkerThread.run(ProcedureExecutor.java:2181) ~[hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] 2024-11-23T15:45:03,793 INFO [master/b712f9af2c12:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-23T15:45:03,799 INFO [master/b712f9af2c12:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-23T15:45:03,800 INFO [master/b712f9af2c12:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-23T15:45:03,801 INFO [master/b712f9af2c12:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-23T15:45:03,803 INFO [master/b712f9af2c12:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-23T15:45:03,804 INFO [master/b712f9af2c12:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-23T15:45:03,810 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-23T15:45:03,810 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-23T15:45:03,819 DEBUG [master/b712f9af2c12:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/b712f9af2c12:0:becomeActiveMaster-HFileCleaner.large.0-1732376703806,5,FailOnTimeoutGroup] 2024-11-23T15:45:03,822 DEBUG [master/b712f9af2c12:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/b712f9af2c12:0:becomeActiveMaster-HFileCleaner.small.0-1732376703819,5,FailOnTimeoutGroup] 2024-11-23T15:45:03,822 INFO [master/b712f9af2c12:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-23T15:45:03,823 INFO [master/b712f9af2c12:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-23T15:45:03,824 INFO [master/b712f9af2c12:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-23T15:45:03,825 INFO [master/b712f9af2c12:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-23T15:45:03,832 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-648790401_22 at /127.0.0.1:53482 [Receiving block BP-672935730-172.17.0.2-1732376696384:blk_-9223372036854775712_1012] {}] datanode.DataXceiver(331): 127.0.0.1:46329:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:53482 dst: /127.0.0.1:46329 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:45:03,841 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_-9223372036854775712_1013 (size=1321) 2024-11-23T15:45:03,847 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-23T15:45:03,851 DEBUG [PEWorker-1 {}] util.FSTableDescriptors(591): Deleted hdfs://localhost:37303/user/jenkins/test-data/586bc8a6-e95a-79e2-f88e-ae2dc972e8e6/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-23T15:45:03,851 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:37303/user/jenkins/test-data/586bc8a6-e95a-79e2-f88e-ae2dc972e8e6/data/hbase/meta/.tabledesc/.tableinfo.0000000002.1321 2024-11-23T15:45:03,852 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:37303/user/jenkins/test-data/586bc8a6-e95a-79e2-f88e-ae2dc972e8e6 2024-11-23T15:45:03,866 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-23T15:45:03,866 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-23T15:45:03,874 INFO [RS:0;b712f9af2c12:34219 {}] regionserver.HRegionServer(2659): reportForDuty to master=b712f9af2c12,34855,1732376700708 with port=34219, startcode=1732376701684 2024-11-23T15:45:03,874 INFO [RS:2;b712f9af2c12:36473 {}] regionserver.HRegionServer(2659): reportForDuty to master=b712f9af2c12,34855,1732376700708 with port=36473, startcode=1732376701950 2024-11-23T15:45:03,876 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34855 {}] master.ServerManager(363): Checking decommissioned status of RegionServer b712f9af2c12,36473,1732376701950 2024-11-23T15:45:03,880 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34855 {}] master.ServerManager(517): Registering regionserver=b712f9af2c12,36473,1732376701950 2024-11-23T15:45:03,885 INFO [RS:1;b712f9af2c12:45337 {}] regionserver.HRegionServer(2659): reportForDuty to master=b712f9af2c12,34855,1732376700708 with port=45337, startcode=1732376701834 2024-11-23T15:45:03,886 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-648790401_22 at /127.0.0.1:53486 [Receiving block BP-672935730-172.17.0.2-1732376696384:blk_-9223372036854775696_1014] {}] datanode.DataXceiver(331): 127.0.0.1:46329:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:53486 dst: /127.0.0.1:46329 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:45:03,897 DEBUG [RS:2;b712f9af2c12:36473 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:37303/user/jenkins/test-data/586bc8a6-e95a-79e2-f88e-ae2dc972e8e6 2024-11-23T15:45:03,898 DEBUG [RS:2;b712f9af2c12:36473 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:37303 2024-11-23T15:45:03,898 DEBUG [RS:2;b712f9af2c12:36473 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-23T15:45:03,902 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34855 {}] master.ServerManager(363): Checking decommissioned status of RegionServer b712f9af2c12,34219,1732376701684 2024-11-23T15:45:03,902 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34855 {}] master.ServerManager(517): Registering regionserver=b712f9af2c12,34219,1732376701684 2024-11-23T15:45:03,905 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_-9223372036854775696_1015 (size=32) 2024-11-23T15:45:03,907 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-23T15:45:03,908 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T15:45:03,909 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34855 {}] master.ServerManager(363): Checking decommissioned status of RegionServer b712f9af2c12,45337,1732376701834 2024-11-23T15:45:03,909 DEBUG [RS:0;b712f9af2c12:34219 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:37303/user/jenkins/test-data/586bc8a6-e95a-79e2-f88e-ae2dc972e8e6 2024-11-23T15:45:03,909 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34855 {}] master.ServerManager(517): Registering regionserver=b712f9af2c12,45337,1732376701834 2024-11-23T15:45:03,909 DEBUG [RS:0;b712f9af2c12:34219 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:37303 2024-11-23T15:45:03,910 DEBUG [RS:0;b712f9af2c12:34219 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-23T15:45:03,911 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-23T15:45:03,914 DEBUG [RS:1;b712f9af2c12:45337 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:37303/user/jenkins/test-data/586bc8a6-e95a-79e2-f88e-ae2dc972e8e6 2024-11-23T15:45:03,914 DEBUG [RS:1;b712f9af2c12:45337 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:37303 2024-11-23T15:45:03,915 DEBUG [RS:1;b712f9af2c12:45337 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-23T15:45:03,915 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-23T15:45:03,916 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:45:03,917 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T15:45:03,917 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-23T15:45:03,921 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-23T15:45:03,921 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:45:03,922 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T15:45:03,923 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-23T15:45:03,925 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-23T15:45:03,926 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:45:03,927 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T15:45:03,927 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-23T15:45:03,931 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-23T15:45:03,931 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:45:03,931 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34855-0x10169a8366e0000, quorum=127.0.0.1:64492, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-23T15:45:03,933 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T15:45:03,933 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-23T15:45:03,935 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37303/user/jenkins/test-data/586bc8a6-e95a-79e2-f88e-ae2dc972e8e6/data/hbase/meta/1588230740 2024-11-23T15:45:03,936 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37303/user/jenkins/test-data/586bc8a6-e95a-79e2-f88e-ae2dc972e8e6/data/hbase/meta/1588230740 2024-11-23T15:45:03,942 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-23T15:45:03,942 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-23T15:45:03,943 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-23T15:45:03,946 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-23T15:45:03,963 DEBUG [RS:2;b712f9af2c12:36473 {}] zookeeper.ZKUtil(111): regionserver:36473-0x10169a8366e0003, quorum=127.0.0.1:64492, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/b712f9af2c12,36473,1732376701950 2024-11-23T15:45:03,963 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37303/user/jenkins/test-data/586bc8a6-e95a-79e2-f88e-ae2dc972e8e6/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-23T15:45:03,963 WARN [RS:2;b712f9af2c12:36473 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-23T15:45:03,963 INFO [RS:2;b712f9af2c12:36473 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-23T15:45:03,963 DEBUG [RS:2;b712f9af2c12:36473 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:37303/user/jenkins/test-data/586bc8a6-e95a-79e2-f88e-ae2dc972e8e6/WALs/b712f9af2c12,36473,1732376701950 2024-11-23T15:45:03,964 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=63708557, jitterRate=-0.05066852271556854}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-23T15:45:03,965 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [b712f9af2c12,45337,1732376701834] 2024-11-23T15:45:03,965 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [b712f9af2c12,34219,1732376701684] 2024-11-23T15:45:03,965 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [b712f9af2c12,36473,1732376701950] 2024-11-23T15:45:03,965 DEBUG [RS:0;b712f9af2c12:34219 {}] zookeeper.ZKUtil(111): regionserver:34219-0x10169a8366e0001, quorum=127.0.0.1:64492, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/b712f9af2c12,34219,1732376701684 2024-11-23T15:45:03,965 WARN [RS:0;b712f9af2c12:34219 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-23T15:45:03,966 INFO [RS:0;b712f9af2c12:34219 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-23T15:45:03,966 DEBUG [RS:0;b712f9af2c12:34219 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:37303/user/jenkins/test-data/586bc8a6-e95a-79e2-f88e-ae2dc972e8e6/WALs/b712f9af2c12,34219,1732376701684 2024-11-23T15:45:03,967 DEBUG [RS:1;b712f9af2c12:45337 {}] zookeeper.ZKUtil(111): regionserver:45337-0x10169a8366e0002, quorum=127.0.0.1:64492, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/b712f9af2c12,45337,1732376701834 2024-11-23T15:45:03,967 WARN [RS:1;b712f9af2c12:45337 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-23T15:45:03,967 INFO [RS:1;b712f9af2c12:45337 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-23T15:45:03,967 DEBUG [RS:1;b712f9af2c12:45337 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:37303/user/jenkins/test-data/586bc8a6-e95a-79e2-f88e-ae2dc972e8e6/WALs/b712f9af2c12,45337,1732376701834 2024-11-23T15:45:03,970 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732376703908Initializing all the Stores at 1732376703911 (+3 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732376703911Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732376703911Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732376703911Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732376703911Cleaning up temporary data from old regions at 1732376703942 (+31 ms)Region opened successfully at 1732376703969 (+27 ms) 2024-11-23T15:45:03,970 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-23T15:45:03,970 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-23T15:45:03,970 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-23T15:45:03,970 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-23T15:45:03,970 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-23T15:45:03,976 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-23T15:45:03,976 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732376703970Disabling compacts and flushes for region at 1732376703970Disabling writes for close at 1732376703970Writing region close event to WAL at 1732376703975 (+5 ms)Closed at 1732376703976 (+1 ms) 2024-11-23T15:45:03,981 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-23T15:45:03,981 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-23T15:45:03,990 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-23T15:45:04,000 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-23T15:45:04,004 INFO [RS:0;b712f9af2c12:34219 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-23T15:45:04,005 INFO [RS:1;b712f9af2c12:45337 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-23T15:45:04,007 INFO [RS:2;b712f9af2c12:36473 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-23T15:45:04,008 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-23T15:45:04,028 INFO [RS:0;b712f9af2c12:34219 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-23T15:45:04,028 INFO [RS:2;b712f9af2c12:36473 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-23T15:45:04,030 INFO [RS:1;b712f9af2c12:45337 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-23T15:45:04,036 INFO [RS:0;b712f9af2c12:34219 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-23T15:45:04,036 INFO [RS:2;b712f9af2c12:36473 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-23T15:45:04,036 INFO [RS:1;b712f9af2c12:45337 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-23T15:45:04,036 INFO [RS:0;b712f9af2c12:34219 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-23T15:45:04,036 INFO [RS:1;b712f9af2c12:45337 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-23T15:45:04,036 INFO [RS:2;b712f9af2c12:36473 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-23T15:45:04,039 INFO [RS:0;b712f9af2c12:34219 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-23T15:45:04,039 INFO [RS:2;b712f9af2c12:36473 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-23T15:45:04,039 INFO [RS:1;b712f9af2c12:45337 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-23T15:45:04,046 INFO [RS:2;b712f9af2c12:36473 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-23T15:45:04,046 INFO [RS:1;b712f9af2c12:45337 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-23T15:45:04,046 INFO [RS:0;b712f9af2c12:34219 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-23T15:45:04,048 INFO [RS:2;b712f9af2c12:36473 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-23T15:45:04,048 INFO [RS:0;b712f9af2c12:34219 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-23T15:45:04,048 INFO [RS:1;b712f9af2c12:45337 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-23T15:45:04,048 DEBUG [RS:2;b712f9af2c12:36473 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/b712f9af2c12:0, corePoolSize=1, maxPoolSize=1 2024-11-23T15:45:04,048 DEBUG [RS:0;b712f9af2c12:34219 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/b712f9af2c12:0, corePoolSize=1, maxPoolSize=1 2024-11-23T15:45:04,048 DEBUG [RS:1;b712f9af2c12:45337 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/b712f9af2c12:0, corePoolSize=1, maxPoolSize=1 2024-11-23T15:45:04,048 DEBUG [RS:2;b712f9af2c12:36473 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/b712f9af2c12:0, corePoolSize=1, maxPoolSize=1 2024-11-23T15:45:04,048 DEBUG [RS:0;b712f9af2c12:34219 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/b712f9af2c12:0, corePoolSize=1, maxPoolSize=1 2024-11-23T15:45:04,048 DEBUG [RS:1;b712f9af2c12:45337 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/b712f9af2c12:0, corePoolSize=1, maxPoolSize=1 2024-11-23T15:45:04,048 DEBUG [RS:2;b712f9af2c12:36473 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/b712f9af2c12:0, corePoolSize=1, maxPoolSize=1 2024-11-23T15:45:04,048 DEBUG [RS:1;b712f9af2c12:45337 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/b712f9af2c12:0, corePoolSize=1, maxPoolSize=1 2024-11-23T15:45:04,049 DEBUG [RS:0;b712f9af2c12:34219 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/b712f9af2c12:0, corePoolSize=1, maxPoolSize=1 2024-11-23T15:45:04,049 DEBUG [RS:2;b712f9af2c12:36473 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/b712f9af2c12:0, corePoolSize=1, maxPoolSize=1 2024-11-23T15:45:04,049 DEBUG [RS:1;b712f9af2c12:45337 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/b712f9af2c12:0, corePoolSize=1, maxPoolSize=1 2024-11-23T15:45:04,049 DEBUG [RS:0;b712f9af2c12:34219 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/b712f9af2c12:0, corePoolSize=1, maxPoolSize=1 2024-11-23T15:45:04,049 DEBUG [RS:1;b712f9af2c12:45337 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/b712f9af2c12:0, corePoolSize=1, maxPoolSize=1 2024-11-23T15:45:04,049 DEBUG [RS:0;b712f9af2c12:34219 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/b712f9af2c12:0, corePoolSize=1, maxPoolSize=1 2024-11-23T15:45:04,049 DEBUG [RS:2;b712f9af2c12:36473 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/b712f9af2c12:0, corePoolSize=1, maxPoolSize=1 2024-11-23T15:45:04,049 DEBUG [RS:1;b712f9af2c12:45337 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/b712f9af2c12:0, corePoolSize=2, maxPoolSize=2 2024-11-23T15:45:04,049 DEBUG [RS:0;b712f9af2c12:34219 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/b712f9af2c12:0, corePoolSize=2, maxPoolSize=2 2024-11-23T15:45:04,049 DEBUG [RS:2;b712f9af2c12:36473 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/b712f9af2c12:0, corePoolSize=2, maxPoolSize=2 2024-11-23T15:45:04,049 DEBUG [RS:1;b712f9af2c12:45337 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/b712f9af2c12:0, corePoolSize=1, maxPoolSize=1 2024-11-23T15:45:04,049 DEBUG [RS:0;b712f9af2c12:34219 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/b712f9af2c12:0, corePoolSize=1, maxPoolSize=1 2024-11-23T15:45:04,049 DEBUG [RS:2;b712f9af2c12:36473 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/b712f9af2c12:0, corePoolSize=1, maxPoolSize=1 2024-11-23T15:45:04,049 DEBUG [RS:1;b712f9af2c12:45337 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/b712f9af2c12:0, corePoolSize=1, maxPoolSize=1 2024-11-23T15:45:04,049 DEBUG [RS:0;b712f9af2c12:34219 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/b712f9af2c12:0, corePoolSize=1, maxPoolSize=1 2024-11-23T15:45:04,049 DEBUG [RS:2;b712f9af2c12:36473 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/b712f9af2c12:0, corePoolSize=1, maxPoolSize=1 2024-11-23T15:45:04,049 DEBUG [RS:1;b712f9af2c12:45337 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/b712f9af2c12:0, corePoolSize=1, maxPoolSize=1 2024-11-23T15:45:04,049 DEBUG [RS:0;b712f9af2c12:34219 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/b712f9af2c12:0, corePoolSize=1, maxPoolSize=1 2024-11-23T15:45:04,049 DEBUG [RS:2;b712f9af2c12:36473 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/b712f9af2c12:0, corePoolSize=1, maxPoolSize=1 2024-11-23T15:45:04,049 DEBUG [RS:1;b712f9af2c12:45337 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/b712f9af2c12:0, corePoolSize=1, maxPoolSize=1 2024-11-23T15:45:04,050 DEBUG [RS:0;b712f9af2c12:34219 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/b712f9af2c12:0, corePoolSize=1, maxPoolSize=1 2024-11-23T15:45:04,050 DEBUG [RS:2;b712f9af2c12:36473 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/b712f9af2c12:0, corePoolSize=1, maxPoolSize=1 2024-11-23T15:45:04,050 DEBUG [RS:1;b712f9af2c12:45337 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/b712f9af2c12:0, corePoolSize=1, maxPoolSize=1 2024-11-23T15:45:04,050 DEBUG [RS:0;b712f9af2c12:34219 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/b712f9af2c12:0, corePoolSize=1, maxPoolSize=1 2024-11-23T15:45:04,050 DEBUG [RS:2;b712f9af2c12:36473 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/b712f9af2c12:0, corePoolSize=1, maxPoolSize=1 2024-11-23T15:45:04,050 DEBUG [RS:1;b712f9af2c12:45337 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/b712f9af2c12:0, corePoolSize=1, maxPoolSize=1 2024-11-23T15:45:04,050 DEBUG [RS:0;b712f9af2c12:34219 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/b712f9af2c12:0, corePoolSize=1, maxPoolSize=1 2024-11-23T15:45:04,050 DEBUG [RS:2;b712f9af2c12:36473 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/b712f9af2c12:0, corePoolSize=1, maxPoolSize=1 2024-11-23T15:45:04,050 DEBUG [RS:1;b712f9af2c12:45337 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/b712f9af2c12:0, corePoolSize=3, maxPoolSize=3 2024-11-23T15:45:04,050 DEBUG [RS:0;b712f9af2c12:34219 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/b712f9af2c12:0, corePoolSize=3, maxPoolSize=3 2024-11-23T15:45:04,050 DEBUG [RS:2;b712f9af2c12:36473 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/b712f9af2c12:0, corePoolSize=3, maxPoolSize=3 2024-11-23T15:45:04,050 DEBUG [RS:1;b712f9af2c12:45337 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/b712f9af2c12:0, corePoolSize=3, maxPoolSize=3 2024-11-23T15:45:04,050 DEBUG [RS:0;b712f9af2c12:34219 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/b712f9af2c12:0, corePoolSize=3, maxPoolSize=3 2024-11-23T15:45:04,050 DEBUG [RS:2;b712f9af2c12:36473 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/b712f9af2c12:0, corePoolSize=3, maxPoolSize=3 2024-11-23T15:45:04,053 INFO [RS:0;b712f9af2c12:34219 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-23T15:45:04,053 INFO [RS:2;b712f9af2c12:36473 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-23T15:45:04,053 INFO [RS:2;b712f9af2c12:36473 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-23T15:45:04,053 INFO [RS:0;b712f9af2c12:34219 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-23T15:45:04,053 INFO [RS:2;b712f9af2c12:36473 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-23T15:45:04,053 INFO [RS:2;b712f9af2c12:36473 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-23T15:45:04,053 INFO [RS:2;b712f9af2c12:36473 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-23T15:45:04,053 INFO [RS:0;b712f9af2c12:34219 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-23T15:45:04,053 INFO [RS:2;b712f9af2c12:36473 {}] hbase.ChoreService(168): Chore ScheduledChore name=b712f9af2c12,36473,1732376701950-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-23T15:45:04,053 INFO [RS:0;b712f9af2c12:34219 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-23T15:45:04,054 INFO [RS:0;b712f9af2c12:34219 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-23T15:45:04,054 INFO [RS:0;b712f9af2c12:34219 {}] hbase.ChoreService(168): Chore ScheduledChore name=b712f9af2c12,34219,1732376701684-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-23T15:45:04,056 INFO [RS:1;b712f9af2c12:45337 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-23T15:45:04,056 INFO [RS:1;b712f9af2c12:45337 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-23T15:45:04,056 INFO [RS:1;b712f9af2c12:45337 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-23T15:45:04,056 INFO [RS:1;b712f9af2c12:45337 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-23T15:45:04,056 INFO [RS:1;b712f9af2c12:45337 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-23T15:45:04,056 INFO [RS:1;b712f9af2c12:45337 {}] hbase.ChoreService(168): Chore ScheduledChore name=b712f9af2c12,45337,1732376701834-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-23T15:45:04,078 INFO [RS:0;b712f9af2c12:34219 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-23T15:45:04,078 INFO [RS:1;b712f9af2c12:45337 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-23T15:45:04,079 INFO [RS:2;b712f9af2c12:36473 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-23T15:45:04,081 INFO [RS:2;b712f9af2c12:36473 {}] hbase.ChoreService(168): Chore ScheduledChore name=b712f9af2c12,36473,1732376701950-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-23T15:45:04,081 INFO [RS:1;b712f9af2c12:45337 {}] hbase.ChoreService(168): Chore ScheduledChore name=b712f9af2c12,45337,1732376701834-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-23T15:45:04,081 INFO [RS:0;b712f9af2c12:34219 {}] hbase.ChoreService(168): Chore ScheduledChore name=b712f9af2c12,34219,1732376701684-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-23T15:45:04,082 INFO [RS:0;b712f9af2c12:34219 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-23T15:45:04,082 INFO [RS:2;b712f9af2c12:36473 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-23T15:45:04,082 INFO [RS:0;b712f9af2c12:34219 {}] regionserver.Replication(171): b712f9af2c12,34219,1732376701684 started 2024-11-23T15:45:04,082 INFO [RS:2;b712f9af2c12:36473 {}] regionserver.Replication(171): b712f9af2c12,36473,1732376701950 started 2024-11-23T15:45:04,082 INFO [RS:1;b712f9af2c12:45337 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-23T15:45:04,082 INFO [RS:1;b712f9af2c12:45337 {}] regionserver.Replication(171): b712f9af2c12,45337,1732376701834 started 2024-11-23T15:45:04,106 INFO [RS:0;b712f9af2c12:34219 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-23T15:45:04,106 INFO [RS:1;b712f9af2c12:45337 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-23T15:45:04,107 INFO [RS:1;b712f9af2c12:45337 {}] regionserver.HRegionServer(1482): Serving as b712f9af2c12,45337,1732376701834, RpcServer on b712f9af2c12/172.17.0.2:45337, sessionid=0x10169a8366e0002 2024-11-23T15:45:04,107 INFO [RS:0;b712f9af2c12:34219 {}] regionserver.HRegionServer(1482): Serving as b712f9af2c12,34219,1732376701684, RpcServer on b712f9af2c12/172.17.0.2:34219, sessionid=0x10169a8366e0001 2024-11-23T15:45:04,108 INFO [RS:2;b712f9af2c12:36473 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-23T15:45:04,108 DEBUG [RS:1;b712f9af2c12:45337 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-23T15:45:04,108 DEBUG [RS:0;b712f9af2c12:34219 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-23T15:45:04,108 INFO [RS:2;b712f9af2c12:36473 {}] regionserver.HRegionServer(1482): Serving as b712f9af2c12,36473,1732376701950, RpcServer on b712f9af2c12/172.17.0.2:36473, sessionid=0x10169a8366e0003 2024-11-23T15:45:04,108 DEBUG [RS:0;b712f9af2c12:34219 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager b712f9af2c12,34219,1732376701684 2024-11-23T15:45:04,108 DEBUG [RS:1;b712f9af2c12:45337 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager b712f9af2c12,45337,1732376701834 2024-11-23T15:45:04,108 DEBUG [RS:2;b712f9af2c12:36473 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-23T15:45:04,108 DEBUG [RS:1;b712f9af2c12:45337 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'b712f9af2c12,45337,1732376701834' 2024-11-23T15:45:04,108 DEBUG [RS:0;b712f9af2c12:34219 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'b712f9af2c12,34219,1732376701684' 2024-11-23T15:45:04,108 DEBUG [RS:2;b712f9af2c12:36473 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager b712f9af2c12,36473,1732376701950 2024-11-23T15:45:04,108 DEBUG [RS:2;b712f9af2c12:36473 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'b712f9af2c12,36473,1732376701950' 2024-11-23T15:45:04,108 DEBUG [RS:1;b712f9af2c12:45337 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-23T15:45:04,108 DEBUG [RS:0;b712f9af2c12:34219 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-23T15:45:04,108 DEBUG [RS:2;b712f9af2c12:36473 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-23T15:45:04,109 DEBUG [RS:0;b712f9af2c12:34219 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-23T15:45:04,109 DEBUG [RS:1;b712f9af2c12:45337 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-23T15:45:04,109 DEBUG [RS:2;b712f9af2c12:36473 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-23T15:45:04,110 DEBUG [RS:2;b712f9af2c12:36473 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-23T15:45:04,110 DEBUG [RS:0;b712f9af2c12:34219 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-23T15:45:04,110 DEBUG [RS:2;b712f9af2c12:36473 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-23T15:45:04,110 DEBUG [RS:0;b712f9af2c12:34219 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-23T15:45:04,111 DEBUG [RS:2;b712f9af2c12:36473 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager b712f9af2c12,36473,1732376701950 2024-11-23T15:45:04,111 DEBUG [RS:0;b712f9af2c12:34219 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager b712f9af2c12,34219,1732376701684 2024-11-23T15:45:04,111 DEBUG [RS:2;b712f9af2c12:36473 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'b712f9af2c12,36473,1732376701950' 2024-11-23T15:45:04,111 DEBUG [RS:0;b712f9af2c12:34219 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'b712f9af2c12,34219,1732376701684' 2024-11-23T15:45:04,111 DEBUG [RS:2;b712f9af2c12:36473 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-23T15:45:04,111 DEBUG [RS:0;b712f9af2c12:34219 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-23T15:45:04,111 DEBUG [RS:1;b712f9af2c12:45337 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-23T15:45:04,111 DEBUG [RS:1;b712f9af2c12:45337 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-23T15:45:04,111 DEBUG [RS:1;b712f9af2c12:45337 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager b712f9af2c12,45337,1732376701834 2024-11-23T15:45:04,111 DEBUG [RS:1;b712f9af2c12:45337 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'b712f9af2c12,45337,1732376701834' 2024-11-23T15:45:04,111 DEBUG [RS:1;b712f9af2c12:45337 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-23T15:45:04,115 DEBUG [RS:2;b712f9af2c12:36473 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-23T15:45:04,115 DEBUG [RS:0;b712f9af2c12:34219 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-23T15:45:04,115 DEBUG [RS:1;b712f9af2c12:45337 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-23T15:45:04,116 DEBUG [RS:2;b712f9af2c12:36473 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-23T15:45:04,116 INFO [RS:2;b712f9af2c12:36473 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-23T15:45:04,116 DEBUG [RS:0;b712f9af2c12:34219 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-23T15:45:04,116 INFO [RS:2;b712f9af2c12:36473 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-23T15:45:04,116 INFO [RS:0;b712f9af2c12:34219 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-23T15:45:04,117 INFO [RS:0;b712f9af2c12:34219 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-23T15:45:04,117 DEBUG [RS:1;b712f9af2c12:45337 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-23T15:45:04,117 INFO [RS:1;b712f9af2c12:45337 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-23T15:45:04,117 INFO [RS:1;b712f9af2c12:45337 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-23T15:45:04,159 WARN [b712f9af2c12:34855 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-23T15:45:04,221 INFO [RS:1;b712f9af2c12:45337 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-23T15:45:04,221 INFO [RS:2;b712f9af2c12:36473 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-23T15:45:04,222 INFO [RS:0;b712f9af2c12:34219 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-23T15:45:04,224 INFO [RS:1;b712f9af2c12:45337 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=b712f9af2c12%2C45337%2C1732376701834, suffix=, logDir=hdfs://localhost:37303/user/jenkins/test-data/586bc8a6-e95a-79e2-f88e-ae2dc972e8e6/WALs/b712f9af2c12,45337,1732376701834, archiveDir=hdfs://localhost:37303/user/jenkins/test-data/586bc8a6-e95a-79e2-f88e-ae2dc972e8e6/oldWALs, maxLogs=32 2024-11-23T15:45:04,224 INFO [RS:0;b712f9af2c12:34219 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=b712f9af2c12%2C34219%2C1732376701684, suffix=, logDir=hdfs://localhost:37303/user/jenkins/test-data/586bc8a6-e95a-79e2-f88e-ae2dc972e8e6/WALs/b712f9af2c12,34219,1732376701684, archiveDir=hdfs://localhost:37303/user/jenkins/test-data/586bc8a6-e95a-79e2-f88e-ae2dc972e8e6/oldWALs, maxLogs=32 2024-11-23T15:45:04,225 INFO [RS:2;b712f9af2c12:36473 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=b712f9af2c12%2C36473%2C1732376701950, suffix=, logDir=hdfs://localhost:37303/user/jenkins/test-data/586bc8a6-e95a-79e2-f88e-ae2dc972e8e6/WALs/b712f9af2c12,36473,1732376701950, archiveDir=hdfs://localhost:37303/user/jenkins/test-data/586bc8a6-e95a-79e2-f88e-ae2dc972e8e6/oldWALs, maxLogs=32 2024-11-23T15:45:04,249 DEBUG [RS:0;b712f9af2c12:34219 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/586bc8a6-e95a-79e2-f88e-ae2dc972e8e6/WALs/b712f9af2c12,34219,1732376701684/b712f9af2c12%2C34219%2C1732376701684.1732376704232, exclude list is [], retry=0 2024-11-23T15:45:04,251 DEBUG [RS:2;b712f9af2c12:36473 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/586bc8a6-e95a-79e2-f88e-ae2dc972e8e6/WALs/b712f9af2c12,36473,1732376701950/b712f9af2c12%2C36473%2C1732376701950.1732376704231, exclude list is [], retry=0 2024-11-23T15:45:04,251 DEBUG [RS:1;b712f9af2c12:45337 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/586bc8a6-e95a-79e2-f88e-ae2dc972e8e6/WALs/b712f9af2c12,45337,1732376701834/b712f9af2c12%2C45337%2C1732376701834.1732376704231, exclude list is [], retry=0 2024-11-23T15:45:04,252 WARN [IPC Server handler 3 on default port 37303 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-23T15:45:04,252 WARN [IPC Server handler 3 on default port 37303 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-23T15:45:04,253 WARN [IPC Server handler 3 on default port 37303 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-23T15:45:04,257 WARN [IPC Server handler 4 on default port 37303 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-23T15:45:04,257 WARN [IPC Server handler 4 on default port 37303 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-23T15:45:04,257 WARN [IPC Server handler 4 on default port 37303 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-23T15:45:04,260 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46329,DS-574b886e-69b8-4f9e-89d1-eb2ad13216ee,DISK] 2024-11-23T15:45:04,260 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43305,DS-0903c522-593e-4a8d-bed1-b54c0bf2b73b,DISK] 2024-11-23T15:45:04,260 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43305,DS-0903c522-593e-4a8d-bed1-b54c0bf2b73b,DISK] 2024-11-23T15:45:04,261 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43305,DS-0903c522-593e-4a8d-bed1-b54c0bf2b73b,DISK] 2024-11-23T15:45:04,261 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:45071,DS-0d3025cd-3ee6-4429-861f-637602cae18e,DISK] 2024-11-23T15:45:04,262 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:45071,DS-0d3025cd-3ee6-4429-861f-637602cae18e,DISK] 2024-11-23T15:45:04,262 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:45071,DS-0d3025cd-3ee6-4429-861f-637602cae18e,DISK] 2024-11-23T15:45:04,298 INFO [RS:2;b712f9af2c12:36473 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/586bc8a6-e95a-79e2-f88e-ae2dc972e8e6/WALs/b712f9af2c12,36473,1732376701950/b712f9af2c12%2C36473%2C1732376701950.1732376704231 2024-11-23T15:45:04,300 DEBUG [RS:2;b712f9af2c12:36473 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:33133:33133),(127.0.0.1/127.0.0.1:42603:42603)] 2024-11-23T15:45:04,300 INFO [RS:0;b712f9af2c12:34219 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/586bc8a6-e95a-79e2-f88e-ae2dc972e8e6/WALs/b712f9af2c12,34219,1732376701684/b712f9af2c12%2C34219%2C1732376701684.1732376704232 2024-11-23T15:45:04,302 DEBUG [RS:0;b712f9af2c12:34219 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:33133:33133),(127.0.0.1/127.0.0.1:42603:42603)] 2024-11-23T15:45:04,302 INFO [RS:1;b712f9af2c12:45337 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/586bc8a6-e95a-79e2-f88e-ae2dc972e8e6/WALs/b712f9af2c12,45337,1732376701834/b712f9af2c12%2C45337%2C1732376701834.1732376704231 2024-11-23T15:45:04,302 DEBUG [RS:1;b712f9af2c12:45337 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:44331:44331),(127.0.0.1/127.0.0.1:42603:42603),(127.0.0.1/127.0.0.1:33133:33133)] 2024-11-23T15:45:04,412 DEBUG [b712f9af2c12:34855 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=3, allServersCount=3 2024-11-23T15:45:04,421 DEBUG [b712f9af2c12:34855 {}] balancer.BalancerClusterState(204): Hosts are {b712f9af2c12=0} racks are {/default-rack=0} 2024-11-23T15:45:04,430 DEBUG [b712f9af2c12:34855 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-23T15:45:04,430 DEBUG [b712f9af2c12:34855 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-23T15:45:04,430 DEBUG [b712f9af2c12:34855 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-23T15:45:04,430 DEBUG [b712f9af2c12:34855 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-23T15:45:04,430 DEBUG [b712f9af2c12:34855 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-23T15:45:04,430 DEBUG [b712f9af2c12:34855 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-23T15:45:04,430 INFO [b712f9af2c12:34855 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-23T15:45:04,430 INFO [b712f9af2c12:34855 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-23T15:45:04,430 INFO [b712f9af2c12:34855 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-23T15:45:04,430 DEBUG [b712f9af2c12:34855 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-23T15:45:04,438 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=b712f9af2c12,34219,1732376701684 2024-11-23T15:45:04,443 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as b712f9af2c12,34219,1732376701684, state=OPENING 2024-11-23T15:45:04,501 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-23T15:45:04,509 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45337-0x10169a8366e0002, quorum=127.0.0.1:64492, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T15:45:04,509 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34855-0x10169a8366e0000, quorum=127.0.0.1:64492, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T15:45:04,509 DEBUG [pool-77-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36473-0x10169a8366e0003, quorum=127.0.0.1:64492, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T15:45:04,509 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34219-0x10169a8366e0001, quorum=127.0.0.1:64492, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T15:45:04,510 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-23T15:45:04,510 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-23T15:45:04,510 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-23T15:45:04,511 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-23T15:45:04,513 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=b712f9af2c12,34219,1732376701684}] 2024-11-23T15:45:04,515 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-23T15:45:04,688 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-23T15:45:04,692 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54887, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-23T15:45:04,705 INFO [RS_OPEN_META-regionserver/b712f9af2c12:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-23T15:45:04,706 INFO [RS_OPEN_META-regionserver/b712f9af2c12:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-23T15:45:04,706 INFO [RS_OPEN_META-regionserver/b712f9af2c12:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-11-23T15:45:04,710 INFO [RS_OPEN_META-regionserver/b712f9af2c12:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=b712f9af2c12%2C34219%2C1732376701684.meta, suffix=.meta, logDir=hdfs://localhost:37303/user/jenkins/test-data/586bc8a6-e95a-79e2-f88e-ae2dc972e8e6/WALs/b712f9af2c12,34219,1732376701684, archiveDir=hdfs://localhost:37303/user/jenkins/test-data/586bc8a6-e95a-79e2-f88e-ae2dc972e8e6/oldWALs, maxLogs=32 2024-11-23T15:45:04,729 DEBUG [RS_OPEN_META-regionserver/b712f9af2c12:0-0 {event_type=M_RS_OPEN_META, pid=3}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/586bc8a6-e95a-79e2-f88e-ae2dc972e8e6/WALs/b712f9af2c12,34219,1732376701684/b712f9af2c12%2C34219%2C1732376701684.meta.1732376704712.meta, exclude list is [], retry=0 2024-11-23T15:45:04,732 WARN [IPC Server handler 4 on default port 37303 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-23T15:45:04,732 WARN [IPC Server handler 4 on default port 37303 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-23T15:45:04,732 WARN [IPC Server handler 4 on default port 37303 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-23T15:45:04,734 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43305,DS-0903c522-593e-4a8d-bed1-b54c0bf2b73b,DISK] 2024-11-23T15:45:04,734 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:45071,DS-0d3025cd-3ee6-4429-861f-637602cae18e,DISK] 2024-11-23T15:45:04,738 INFO [RS_OPEN_META-regionserver/b712f9af2c12:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/586bc8a6-e95a-79e2-f88e-ae2dc972e8e6/WALs/b712f9af2c12,34219,1732376701684/b712f9af2c12%2C34219%2C1732376701684.meta.1732376704712.meta 2024-11-23T15:45:04,739 DEBUG [RS_OPEN_META-regionserver/b712f9af2c12:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:42603:42603),(127.0.0.1/127.0.0.1:33133:33133)] 2024-11-23T15:45:04,739 DEBUG [RS_OPEN_META-regionserver/b712f9af2c12:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-23T15:45:04,741 DEBUG [RS_OPEN_META-regionserver/b712f9af2c12:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-23T15:45:04,744 DEBUG [RS_OPEN_META-regionserver/b712f9af2c12:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-23T15:45:04,751 INFO [RS_OPEN_META-regionserver/b712f9af2c12:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-23T15:45:04,756 DEBUG [RS_OPEN_META-regionserver/b712f9af2c12:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-23T15:45:04,757 DEBUG [RS_OPEN_META-regionserver/b712f9af2c12:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T15:45:04,757 DEBUG [RS_OPEN_META-regionserver/b712f9af2c12:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-23T15:45:04,757 DEBUG [RS_OPEN_META-regionserver/b712f9af2c12:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-23T15:45:04,760 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-23T15:45:04,762 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-23T15:45:04,762 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:45:04,764 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T15:45:04,764 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-23T15:45:04,766 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-23T15:45:04,766 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:45:04,767 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T15:45:04,768 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-23T15:45:04,769 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-23T15:45:04,770 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:45:04,771 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T15:45:04,771 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-23T15:45:04,773 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-23T15:45:04,773 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:45:04,774 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T15:45:04,774 DEBUG [RS_OPEN_META-regionserver/b712f9af2c12:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-23T15:45:04,777 DEBUG [RS_OPEN_META-regionserver/b712f9af2c12:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37303/user/jenkins/test-data/586bc8a6-e95a-79e2-f88e-ae2dc972e8e6/data/hbase/meta/1588230740 2024-11-23T15:45:04,782 DEBUG [RS_OPEN_META-regionserver/b712f9af2c12:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37303/user/jenkins/test-data/586bc8a6-e95a-79e2-f88e-ae2dc972e8e6/data/hbase/meta/1588230740 2024-11-23T15:45:04,785 DEBUG [RS_OPEN_META-regionserver/b712f9af2c12:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-23T15:45:04,785 DEBUG [RS_OPEN_META-regionserver/b712f9af2c12:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-23T15:45:04,786 DEBUG [RS_OPEN_META-regionserver/b712f9af2c12:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-23T15:45:04,789 DEBUG [RS_OPEN_META-regionserver/b712f9af2c12:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-23T15:45:04,791 INFO [RS_OPEN_META-regionserver/b712f9af2c12:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=74001103, jitterRate=0.10270236432552338}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-23T15:45:04,791 DEBUG [RS_OPEN_META-regionserver/b712f9af2c12:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-23T15:45:04,792 DEBUG [RS_OPEN_META-regionserver/b712f9af2c12:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732376704758Writing region info on filesystem at 1732376704758Initializing all the Stores at 1732376704760 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732376704760Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732376704760Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732376704760Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732376704760Cleaning up temporary data from old regions at 1732376704785 (+25 ms)Running coprocessor post-open hooks at 1732376704791 (+6 ms)Region opened successfully at 1732376704792 (+1 ms) 2024-11-23T15:45:04,800 INFO [RS_OPEN_META-regionserver/b712f9af2c12:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732376704679 2024-11-23T15:45:04,815 DEBUG [RS_OPEN_META-regionserver/b712f9af2c12:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-23T15:45:04,816 INFO [RS_OPEN_META-regionserver/b712f9af2c12:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-23T15:45:04,818 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=b712f9af2c12,34219,1732376701684 2024-11-23T15:45:04,822 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as b712f9af2c12,34219,1732376701684, state=OPEN 2024-11-23T15:45:04,889 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34219-0x10169a8366e0001, quorum=127.0.0.1:64492, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-23T15:45:04,889 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34855-0x10169a8366e0000, quorum=127.0.0.1:64492, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-23T15:45:04,889 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45337-0x10169a8366e0002, quorum=127.0.0.1:64492, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-23T15:45:04,890 DEBUG [pool-77-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36473-0x10169a8366e0003, quorum=127.0.0.1:64492, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-23T15:45:04,890 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-23T15:45:04,890 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-23T15:45:04,890 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-23T15:45:04,890 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-23T15:45:04,890 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=b712f9af2c12,34219,1732376701684 2024-11-23T15:45:04,897 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-23T15:45:04,897 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=b712f9af2c12,34219,1732376701684 in 377 msec 2024-11-23T15:45:04,906 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-23T15:45:04,907 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 910 msec 2024-11-23T15:45:04,909 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-23T15:45:04,909 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-23T15:45:04,927 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-23T15:45:04,928 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b712f9af2c12,34219,1732376701684, seqNum=-1] 2024-11-23T15:45:04,951 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T15:45:04,954 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34745, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T15:45:05,001 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.3620 sec 2024-11-23T15:45:05,001 INFO [master/b712f9af2c12:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732376705001, completionTime=-1 2024-11-23T15:45:05,005 INFO [master/b712f9af2c12:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=3; waited=0ms, expected min=3 server(s), max=3 server(s), master is running 2024-11-23T15:45:05,005 DEBUG [master/b712f9af2c12:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-23T15:45:05,043 INFO [master/b712f9af2c12:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=3 2024-11-23T15:45:05,043 INFO [master/b712f9af2c12:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732376765043 2024-11-23T15:45:05,043 INFO [master/b712f9af2c12:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732376825043 2024-11-23T15:45:05,043 INFO [master/b712f9af2c12:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 38 msec 2024-11-23T15:45:05,045 DEBUG [master/b712f9af2c12:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 1588230740 changed from -1.0 to 0.0, refreshing cache 2024-11-23T15:45:05,054 INFO [master/b712f9af2c12:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=b712f9af2c12,34855,1732376700708-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-23T15:45:05,055 INFO [master/b712f9af2c12:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=b712f9af2c12,34855,1732376700708-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-23T15:45:05,055 INFO [master/b712f9af2c12:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=b712f9af2c12,34855,1732376700708-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-23T15:45:05,056 INFO [master/b712f9af2c12:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-b712f9af2c12:34855, period=300000, unit=MILLISECONDS is enabled. 2024-11-23T15:45:05,057 INFO [master/b712f9af2c12:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-23T15:45:05,057 INFO [master/b712f9af2c12:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-23T15:45:05,063 DEBUG [master/b712f9af2c12:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-23T15:45:05,089 INFO [master/b712f9af2c12:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 3.002sec 2024-11-23T15:45:05,091 INFO [master/b712f9af2c12:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-23T15:45:05,092 INFO [master/b712f9af2c12:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-23T15:45:05,094 INFO [master/b712f9af2c12:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-23T15:45:05,094 INFO [master/b712f9af2c12:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-23T15:45:05,095 INFO [master/b712f9af2c12:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-23T15:45:05,095 INFO [master/b712f9af2c12:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=b712f9af2c12,34855,1732376700708-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-23T15:45:05,096 INFO [master/b712f9af2c12:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=b712f9af2c12,34855,1732376700708-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-23T15:45:05,101 DEBUG [master/b712f9af2c12:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-23T15:45:05,101 INFO [master/b712f9af2c12:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-23T15:45:05,102 INFO [master/b712f9af2c12:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=b712f9af2c12,34855,1732376700708-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-23T15:45:05,123 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4df69356, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T15:45:05,128 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-11-23T15:45:05,128 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-11-23T15:45:05,134 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request b712f9af2c12,34855,-1 for getting cluster id 2024-11-23T15:45:05,138 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-23T15:45:05,150 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'a40f148a-4c7f-4ecd-a664-1fc76cb54180' 2024-11-23T15:45:05,153 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-23T15:45:05,153 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "a40f148a-4c7f-4ecd-a664-1fc76cb54180" 2024-11-23T15:45:05,154 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4e42cd32, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T15:45:05,154 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b712f9af2c12,34855,-1] 2024-11-23T15:45:05,157 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-23T15:45:05,159 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T15:45:05,161 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42836, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-23T15:45:05,164 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@595d0081, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T15:45:05,165 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-23T15:45:05,176 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b712f9af2c12,34219,1732376701684, seqNum=-1] 2024-11-23T15:45:05,176 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T15:45:05,179 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58558, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T15:45:05,201 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=b712f9af2c12,34855,1732376700708 2024-11-23T15:45:05,205 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-23T15:45:05,210 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.AsyncConnectionImpl(321): The fetched master address is b712f9af2c12,34855,1732376700708 2024-11-23T15:45:05,212 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@6dfccafb 2024-11-23T15:45:05,213 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-23T15:45:05,217 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42848, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-23T15:45:05,224 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34855 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-23T15:45:05,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34855 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC 2024-11-23T15:45:05,244 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_PRE_OPERATION 2024-11-23T15:45:05,248 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:45:05,251 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34855 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestHBaseWalOnEC" procId is: 4 2024-11-23T15:45:05,252 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-23T15:45:05,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34855 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-23T15:45:05,269 WARN [IPC Server handler 1 on default port 37303 {}] blockmanagement.BlockPlacementPolicyRackFaultTolerant(145): Only able to place 2 of total expected 3 (maxNodesPerRack=3, numOfReplicas=3) nodes evenly across racks, falling back to evenly place on the remaining racks. This may not guarantee rack-level fault tolerance. Please check if the racks are configured properly. 2024-11-23T15:45:05,269 WARN [IPC Server handler 1 on default port 37303 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-23T15:45:05,269 WARN [IPC Server handler 1 on default port 37303 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-23T15:45:05,269 WARN [IPC Server handler 1 on default port 37303 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-23T15:45:05,271 DEBUG [PEWorker-3 {}] util.FSTableDescriptors(635): Failed write hdfs://localhost:37303/user/jenkins/test-data/586bc8a6-e95a-79e2-f88e-ae2dc972e8e6/data/default/TestHBaseWalOnEC/.tabledesc/.tableinfo.0000000001.392; retrying up to 10 times org.apache.hadoop.ipc.RemoteException: File /user/jenkins/test-data/586bc8a6-e95a-79e2-f88e-ae2dc972e8e6/data/default/TestHBaseWalOnEC/.tabledesc/.tableinfo.0000000001.392 could only be written to 2 of the 3 required nodes for RS-3-2-1024k. There are 3 datanode(s) running and 3 node(s) are excluded in this operation. at org.apache.hadoop.hdfs.server.blockmanagement.BlockManager.chooseTarget4NewBlock(BlockManager.java:2480) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.chooseTargetForNewBlock(FSDirWriteFileOp.java:293) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.getAdditionalBlock(FSNamesystem.java:3075) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.addBlock(NameNodeRpcServer.java:932) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.addBlock(ClientNamenodeProtocolServerSideTranslatorPB.java:603) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.addBlock(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$addBlock$11(ClientNamenodeProtocolTranslatorPB.java:500) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.addBlock(ClientNamenodeProtocolTranslatorPB.java:500) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor6.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.addBlock(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor6.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy48.addBlock(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor6.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy48.addBlock(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor6.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy48.addBlock(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor6.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy48.addBlock(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor6.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy48.addBlock(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DFSOutputStream.addBlock(DFSOutputStream.java:1143) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSStripedOutputStream.allocateNewBlock(DFSStripedOutputStream.java:508) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSStripedOutputStream.writeChunk(DFSStripedOutputStream.java:561) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FSOutputSummer.writeChecksumChunks(FSOutputSummer.java:220) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.fs.FSOutputSummer.flushBuffer(FSOutputSummer.java:165) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.fs.FSOutputSummer.flushBuffer(FSOutputSummer.java:146) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSStripedOutputStream.closeImpl(DFSStripedOutputStream.java:1234) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSOutputStream.close(DFSOutputStream.java:861) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FSDataOutputStream$PositionCache.close(FSDataOutputStream.java:77) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.fs.FSDataOutputStream.close(FSDataOutputStream.java:106) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hbase.util.FSTableDescriptors.writeTableDescriptor(FSTableDescriptors.java:631) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FSTableDescriptors.createTableDescriptorForTableDirectory(FSTableDescriptors.java:707) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FSTableDescriptors.createTableDescriptorForTableDirectory(FSTableDescriptors.java:680) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.master.procedure.CreateTableProcedure.createFsLayout(CreateTableProcedure.java:376) ~[classes/:?] at org.apache.hadoop.hbase.master.procedure.CreateTableProcedure.createFsLayout(CreateTableProcedure.java:353) ~[classes/:?] at org.apache.hadoop.hbase.master.procedure.CreateTableProcedure.executeFromState(CreateTableProcedure.java:107) ~[classes/:?] at org.apache.hadoop.hbase.master.procedure.CreateTableProcedure.executeFromState(CreateTableProcedure.java:59) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.StateMachineProcedure.execute(StateMachineProcedure.java:188) ~[hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.Procedure.doExecute(Procedure.java:941) ~[hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.ProcedureExecutor.execProcedure(ProcedureExecutor.java:1825) ~[hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.ProcedureExecutor.executeProcedure(ProcedureExecutor.java:1503) ~[hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.ProcedureExecutor$WorkerThread.runProcedure(ProcedureExecutor.java:2154) ~[hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.ProcedureExecutor$WorkerThread.run(ProcedureExecutor.java:2181) ~[hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] 2024-11-23T15:45:05,285 WARN [PEWorker-3 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-23T15:45:05,286 WARN [PEWorker-3 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-23T15:45:05,301 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-648790401_22 at /127.0.0.1:53514 [Receiving block BP-672935730-172.17.0.2-1732376696384:blk_-9223372036854775680_1020] {}] datanode.DataXceiver(331): 127.0.0.1:46329:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:53514 dst: /127.0.0.1:46329 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:45:05,311 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_-9223372036854775680_1021 (size=392) 2024-11-23T15:45:05,312 WARN [PEWorker-3 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-23T15:45:05,314 DEBUG [PEWorker-3 {}] util.FSTableDescriptors(591): Deleted hdfs://localhost:37303/user/jenkins/test-data/586bc8a6-e95a-79e2-f88e-ae2dc972e8e6/data/default/TestHBaseWalOnEC/.tabledesc/.tableinfo.0000000001.392 2024-11-23T15:45:05,323 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 0879d9354bcb63ac1a30dc0c6cc2d387, NAME => 'TestHBaseWalOnEC,,1732376705219.0879d9354bcb63ac1a30dc0c6cc2d387.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37303/user/jenkins/test-data/586bc8a6-e95a-79e2-f88e-ae2dc972e8e6 2024-11-23T15:45:05,336 WARN [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-23T15:45:05,336 WARN [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-23T15:45:05,339 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-648790401_22 at /127.0.0.1:53536 [Receiving block BP-672935730-172.17.0.2-1732376696384:blk_-9223372036854775664_1022] {}] datanode.DataXceiver(331): 127.0.0.1:46329:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:53536 dst: /127.0.0.1:46329 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:45:05,347 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_-9223372036854775664_1023 (size=51) 2024-11-23T15:45:05,348 WARN [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-23T15:45:05,348 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1732376705219.0879d9354bcb63ac1a30dc0c6cc2d387.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T15:45:05,348 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1722): Closing 0879d9354bcb63ac1a30dc0c6cc2d387, disabling compactions & flushes 2024-11-23T15:45:05,349 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1732376705219.0879d9354bcb63ac1a30dc0c6cc2d387. 2024-11-23T15:45:05,349 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1732376705219.0879d9354bcb63ac1a30dc0c6cc2d387. 2024-11-23T15:45:05,349 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1732376705219.0879d9354bcb63ac1a30dc0c6cc2d387. after waiting 0 ms 2024-11-23T15:45:05,349 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1732376705219.0879d9354bcb63ac1a30dc0c6cc2d387. 2024-11-23T15:45:05,349 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1732376705219.0879d9354bcb63ac1a30dc0c6cc2d387. 2024-11-23T15:45:05,349 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1676): Region close journal for 0879d9354bcb63ac1a30dc0c6cc2d387: Waiting for close lock at 1732376705348Disabling compacts and flushes for region at 1732376705348Disabling writes for close at 1732376705349 (+1 ms)Writing region close event to WAL at 1732376705349Closed at 1732376705349 2024-11-23T15:45:05,351 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ADD_TO_META 2024-11-23T15:45:05,357 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestHBaseWalOnEC,,1732376705219.0879d9354bcb63ac1a30dc0c6cc2d387.","families":{"info":[{"qualifier":"regioninfo","vlen":50,"tag":[],"timestamp":"1732376705351"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732376705351"}]},"ts":"1732376705351"} 2024-11-23T15:45:05,361 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-23T15:45:05,363 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-23T15:45:05,366 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732376705363"}]},"ts":"1732376705363"} 2024-11-23T15:45:05,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34855 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-23T15:45:05,370 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLING in hbase:meta 2024-11-23T15:45:05,371 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {b712f9af2c12=0} racks are {/default-rack=0} 2024-11-23T15:45:05,372 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-23T15:45:05,372 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-23T15:45:05,372 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-23T15:45:05,372 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-23T15:45:05,372 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-23T15:45:05,372 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-23T15:45:05,372 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-23T15:45:05,373 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-23T15:45:05,373 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-23T15:45:05,373 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-23T15:45:05,375 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=0879d9354bcb63ac1a30dc0c6cc2d387, ASSIGN}] 2024-11-23T15:45:05,379 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=0879d9354bcb63ac1a30dc0c6cc2d387, ASSIGN 2024-11-23T15:45:05,384 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=0879d9354bcb63ac1a30dc0c6cc2d387, ASSIGN; state=OFFLINE, location=b712f9af2c12,34219,1732376701684; forceNewPlan=false, retain=false 2024-11-23T15:45:05,424 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45071 is added to blk_-9223372036854775757_1006 (size=196) 2024-11-23T15:45:05,425 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45071 is added to blk_-9223372036854775741_1008 (size=1189) 2024-11-23T15:45:05,425 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43305 is added to blk_-9223372036854775740_1008 (size=1189) 2024-11-23T15:45:05,425 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43305 is added to blk_-9223372036854775756_1006 (size=196) 2024-11-23T15:45:05,440 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45071 is added to blk_-9223372036854775724_1010 (size=34) 2024-11-23T15:45:05,441 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43305 is added to blk_-9223372036854775725_1010 (size=34) 2024-11-23T15:45:05,441 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45071 is added to blk_-9223372036854775772_1004 (size=42) 2024-11-23T15:45:05,442 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43305 is added to blk_-9223372036854775773_1004 (size=42) 2024-11-23T15:45:05,536 INFO [b712f9af2c12:34855 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-11-23T15:45:05,537 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=0879d9354bcb63ac1a30dc0c6cc2d387, regionState=OPENING, regionLocation=b712f9af2c12,34219,1732376701684 2024-11-23T15:45:05,542 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=0879d9354bcb63ac1a30dc0c6cc2d387, ASSIGN because future has completed 2024-11-23T15:45:05,544 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 0879d9354bcb63ac1a30dc0c6cc2d387, server=b712f9af2c12,34219,1732376701684}] 2024-11-23T15:45:05,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34855 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-23T15:45:05,705 INFO [RS_OPEN_REGION-regionserver/b712f9af2c12:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestHBaseWalOnEC,,1732376705219.0879d9354bcb63ac1a30dc0c6cc2d387. 2024-11-23T15:45:05,706 DEBUG [RS_OPEN_REGION-regionserver/b712f9af2c12:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 0879d9354bcb63ac1a30dc0c6cc2d387, NAME => 'TestHBaseWalOnEC,,1732376705219.0879d9354bcb63ac1a30dc0c6cc2d387.', STARTKEY => '', ENDKEY => ''} 2024-11-23T15:45:05,706 DEBUG [RS_OPEN_REGION-regionserver/b712f9af2c12:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestHBaseWalOnEC 0879d9354bcb63ac1a30dc0c6cc2d387 2024-11-23T15:45:05,706 DEBUG [RS_OPEN_REGION-regionserver/b712f9af2c12:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1732376705219.0879d9354bcb63ac1a30dc0c6cc2d387.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T15:45:05,707 DEBUG [RS_OPEN_REGION-regionserver/b712f9af2c12:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 0879d9354bcb63ac1a30dc0c6cc2d387 2024-11-23T15:45:05,707 DEBUG [RS_OPEN_REGION-regionserver/b712f9af2c12:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 0879d9354bcb63ac1a30dc0c6cc2d387 2024-11-23T15:45:05,709 INFO [StoreOpener-0879d9354bcb63ac1a30dc0c6cc2d387-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 0879d9354bcb63ac1a30dc0c6cc2d387 2024-11-23T15:45:05,713 INFO [StoreOpener-0879d9354bcb63ac1a30dc0c6cc2d387-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 0879d9354bcb63ac1a30dc0c6cc2d387 columnFamilyName cf 2024-11-23T15:45:05,713 DEBUG [StoreOpener-0879d9354bcb63ac1a30dc0c6cc2d387-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:45:05,715 INFO [StoreOpener-0879d9354bcb63ac1a30dc0c6cc2d387-1 {}] regionserver.HStore(327): Store=0879d9354bcb63ac1a30dc0c6cc2d387/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T15:45:05,715 DEBUG [RS_OPEN_REGION-regionserver/b712f9af2c12:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 0879d9354bcb63ac1a30dc0c6cc2d387 2024-11-23T15:45:05,718 DEBUG [RS_OPEN_REGION-regionserver/b712f9af2c12:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37303/user/jenkins/test-data/586bc8a6-e95a-79e2-f88e-ae2dc972e8e6/data/default/TestHBaseWalOnEC/0879d9354bcb63ac1a30dc0c6cc2d387 2024-11-23T15:45:05,719 DEBUG [RS_OPEN_REGION-regionserver/b712f9af2c12:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37303/user/jenkins/test-data/586bc8a6-e95a-79e2-f88e-ae2dc972e8e6/data/default/TestHBaseWalOnEC/0879d9354bcb63ac1a30dc0c6cc2d387 2024-11-23T15:45:05,720 DEBUG [RS_OPEN_REGION-regionserver/b712f9af2c12:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 0879d9354bcb63ac1a30dc0c6cc2d387 2024-11-23T15:45:05,720 DEBUG [RS_OPEN_REGION-regionserver/b712f9af2c12:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 0879d9354bcb63ac1a30dc0c6cc2d387 2024-11-23T15:45:05,723 DEBUG [RS_OPEN_REGION-regionserver/b712f9af2c12:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 0879d9354bcb63ac1a30dc0c6cc2d387 2024-11-23T15:45:05,732 DEBUG [RS_OPEN_REGION-regionserver/b712f9af2c12:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37303/user/jenkins/test-data/586bc8a6-e95a-79e2-f88e-ae2dc972e8e6/data/default/TestHBaseWalOnEC/0879d9354bcb63ac1a30dc0c6cc2d387/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-23T15:45:05,734 INFO [RS_OPEN_REGION-regionserver/b712f9af2c12:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 0879d9354bcb63ac1a30dc0c6cc2d387; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=74214368, jitterRate=0.1058802604675293}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-23T15:45:05,735 DEBUG [RS_OPEN_REGION-regionserver/b712f9af2c12:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 0879d9354bcb63ac1a30dc0c6cc2d387 2024-11-23T15:45:05,735 DEBUG [RS_OPEN_REGION-regionserver/b712f9af2c12:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 0879d9354bcb63ac1a30dc0c6cc2d387: Running coprocessor pre-open hook at 1732376705707Writing region info on filesystem at 1732376705707Initializing all the Stores at 1732376705709 (+2 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732376705709Cleaning up temporary data from old regions at 1732376705720 (+11 ms)Running coprocessor post-open hooks at 1732376705735 (+15 ms)Region opened successfully at 1732376705735 2024-11-23T15:45:05,737 INFO [RS_OPEN_REGION-regionserver/b712f9af2c12:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestHBaseWalOnEC,,1732376705219.0879d9354bcb63ac1a30dc0c6cc2d387., pid=6, masterSystemTime=1732376705699 2024-11-23T15:45:05,742 DEBUG [RS_OPEN_REGION-regionserver/b712f9af2c12:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestHBaseWalOnEC,,1732376705219.0879d9354bcb63ac1a30dc0c6cc2d387. 2024-11-23T15:45:05,742 INFO [RS_OPEN_REGION-regionserver/b712f9af2c12:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestHBaseWalOnEC,,1732376705219.0879d9354bcb63ac1a30dc0c6cc2d387. 2024-11-23T15:45:05,744 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=0879d9354bcb63ac1a30dc0c6cc2d387, regionState=OPEN, openSeqNum=2, regionLocation=b712f9af2c12,34219,1732376701684 2024-11-23T15:45:05,749 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 0879d9354bcb63ac1a30dc0c6cc2d387, server=b712f9af2c12,34219,1732376701684 because future has completed 2024-11-23T15:45:05,763 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-23T15:45:05,764 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 0879d9354bcb63ac1a30dc0c6cc2d387, server=b712f9af2c12,34219,1732376701684 in 208 msec 2024-11-23T15:45:05,772 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-23T15:45:05,772 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=0879d9354bcb63ac1a30dc0c6cc2d387, ASSIGN in 390 msec 2024-11-23T15:45:05,782 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-23T15:45:05,783 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732376705782"}]},"ts":"1732376705782"} 2024-11-23T15:45:05,794 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLED in hbase:meta 2024-11-23T15:45:05,797 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_POST_OPERATION 2024-11-23T15:45:05,804 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC in 570 msec 2024-11-23T15:45:05,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34855 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-23T15:45:05,891 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table TestHBaseWalOnEC get assigned. Timeout = 60000ms 2024-11-23T15:45:05,891 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestHBaseWalOnEC completed 2024-11-23T15:45:05,892 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-23T15:45:05,899 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table TestHBaseWalOnEC assigned to meta. Checking AM states. 2024-11-23T15:45:05,899 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-23T15:45:05,900 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table TestHBaseWalOnEC assigned. 2024-11-23T15:45:05,911 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestHBaseWalOnEC', row='row', locateType=CURRENT is [region=TestHBaseWalOnEC,,1732376705219.0879d9354bcb63ac1a30dc0c6cc2d387., hostname=b712f9af2c12,34219,1732376701684, seqNum=2] 2024-11-23T15:45:05,925 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34855 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestHBaseWalOnEC 2024-11-23T15:45:05,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34855 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC 2024-11-23T15:45:05,936 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_PREPARE 2024-11-23T15:45:05,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34855 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-23T15:45:05,940 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-23T15:45:05,942 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-23T15:45:06,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34855 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-23T15:45:06,117 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34219 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-11-23T15:45:06,118 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b712f9af2c12:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestHBaseWalOnEC,,1732376705219.0879d9354bcb63ac1a30dc0c6cc2d387. 2024-11-23T15:45:06,122 INFO [RS_FLUSH_OPERATIONS-regionserver/b712f9af2c12:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing 0879d9354bcb63ac1a30dc0c6cc2d387 1/1 column families, dataSize=32 B heapSize=360 B 2024-11-23T15:45:06,192 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b712f9af2c12:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37303/user/jenkins/test-data/586bc8a6-e95a-79e2-f88e-ae2dc972e8e6/data/default/TestHBaseWalOnEC/0879d9354bcb63ac1a30dc0c6cc2d387/.tmp/cf/9abfa68a31a1408fafed9b66f664d915 is 36, key is row/cf:cq/1732376705914/Put/seqid=0 2024-11-23T15:45:06,201 WARN [RS_FLUSH_OPERATIONS-regionserver/b712f9af2c12:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-23T15:45:06,201 WARN [RS_FLUSH_OPERATIONS-regionserver/b712f9af2c12:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-23T15:45:06,216 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1560226346_22 at /127.0.0.1:53568 [Receiving block BP-672935730-172.17.0.2-1732376696384:blk_-9223372036854775648_1024] {}] datanode.DataXceiver(331): 127.0.0.1:46329:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:53568 dst: /127.0.0.1:46329 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:45:06,222 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_-9223372036854775648_1025 (size=4787) 2024-11-23T15:45:06,223 WARN [RS_FLUSH_OPERATIONS-regionserver/b712f9af2c12:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-23T15:45:06,223 INFO [RS_FLUSH_OPERATIONS-regionserver/b712f9af2c12:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=32 B at sequenceid=5 (bloomFilter=false), to=hdfs://localhost:37303/user/jenkins/test-data/586bc8a6-e95a-79e2-f88e-ae2dc972e8e6/data/default/TestHBaseWalOnEC/0879d9354bcb63ac1a30dc0c6cc2d387/.tmp/cf/9abfa68a31a1408fafed9b66f664d915 2024-11-23T15:45:06,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34855 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-23T15:45:06,270 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b712f9af2c12:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37303/user/jenkins/test-data/586bc8a6-e95a-79e2-f88e-ae2dc972e8e6/data/default/TestHBaseWalOnEC/0879d9354bcb63ac1a30dc0c6cc2d387/.tmp/cf/9abfa68a31a1408fafed9b66f664d915 as hdfs://localhost:37303/user/jenkins/test-data/586bc8a6-e95a-79e2-f88e-ae2dc972e8e6/data/default/TestHBaseWalOnEC/0879d9354bcb63ac1a30dc0c6cc2d387/cf/9abfa68a31a1408fafed9b66f664d915 2024-11-23T15:45:06,281 INFO [RS_FLUSH_OPERATIONS-regionserver/b712f9af2c12:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37303/user/jenkins/test-data/586bc8a6-e95a-79e2-f88e-ae2dc972e8e6/data/default/TestHBaseWalOnEC/0879d9354bcb63ac1a30dc0c6cc2d387/cf/9abfa68a31a1408fafed9b66f664d915, entries=1, sequenceid=5, filesize=4.7 K 2024-11-23T15:45:06,289 INFO [RS_FLUSH_OPERATIONS-regionserver/b712f9af2c12:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~32 B/32, heapSize ~344 B/344, currentSize=0 B/0 for 0879d9354bcb63ac1a30dc0c6cc2d387 in 166ms, sequenceid=5, compaction requested=false 2024-11-23T15:45:06,290 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b712f9af2c12:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestHBaseWalOnEC' 2024-11-23T15:45:06,293 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b712f9af2c12:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for 0879d9354bcb63ac1a30dc0c6cc2d387: 2024-11-23T15:45:06,293 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b712f9af2c12:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestHBaseWalOnEC,,1732376705219.0879d9354bcb63ac1a30dc0c6cc2d387. 2024-11-23T15:45:06,294 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b712f9af2c12:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-11-23T15:45:06,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34855 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-11-23T15:45:06,304 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-11-23T15:45:06,304 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 357 msec 2024-11-23T15:45:06,309 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC in 377 msec 2024-11-23T15:45:06,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34855 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-23T15:45:06,571 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestHBaseWalOnEC completed 2024-11-23T15:45:06,585 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-23T15:45:06,585 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-23T15:45:06,585 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-23T15:45:06,590 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T15:45:06,590 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T15:45:06,590 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-23T15:45:06,591 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-23T15:45:06,591 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1496649153, stopped=false 2024-11-23T15:45:06,591 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=b712f9af2c12,34855,1732376700708 2024-11-23T15:45:06,642 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45337-0x10169a8366e0002, quorum=127.0.0.1:64492, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-23T15:45:06,642 DEBUG [pool-77-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36473-0x10169a8366e0003, quorum=127.0.0.1:64492, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-23T15:45:06,642 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34855-0x10169a8366e0000, quorum=127.0.0.1:64492, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-23T15:45:06,642 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34219-0x10169a8366e0001, quorum=127.0.0.1:64492, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-23T15:45:06,642 DEBUG [pool-77-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36473-0x10169a8366e0003, quorum=127.0.0.1:64492, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T15:45:06,643 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45337-0x10169a8366e0002, quorum=127.0.0.1:64492, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T15:45:06,643 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34855-0x10169a8366e0000, quorum=127.0.0.1:64492, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T15:45:06,643 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-23T15:45:06,643 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:36473-0x10169a8366e0003, quorum=127.0.0.1:64492, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-23T15:45:06,643 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34219-0x10169a8366e0001, quorum=127.0.0.1:64492, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T15:45:06,643 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-23T15:45:06,643 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-23T15:45:06,643 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:45337-0x10169a8366e0002, quorum=127.0.0.1:64492, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-23T15:45:06,643 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T15:45:06,644 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'b712f9af2c12,34219,1732376701684' ***** 2024-11-23T15:45:06,644 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-23T15:45:06,644 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'b712f9af2c12,45337,1732376701834' ***** 2024-11-23T15:45:06,644 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-23T15:45:06,644 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'b712f9af2c12,36473,1732376701950' ***** 2024-11-23T15:45:06,644 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-23T15:45:06,644 INFO [RS:1;b712f9af2c12:45337 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-23T15:45:06,644 INFO [RS:2;b712f9af2c12:36473 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-23T15:45:06,645 INFO [RS:1;b712f9af2c12:45337 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-23T15:45:06,645 INFO [RS:2;b712f9af2c12:36473 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-23T15:45:06,645 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-23T15:45:06,645 INFO [RS:1;b712f9af2c12:45337 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-23T15:45:06,645 INFO [RS:2;b712f9af2c12:36473 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-23T15:45:06,645 INFO [RS:2;b712f9af2c12:36473 {}] regionserver.HRegionServer(959): stopping server b712f9af2c12,36473,1732376701950 2024-11-23T15:45:06,645 INFO [RS:2;b712f9af2c12:36473 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-23T15:45:06,645 INFO [RS:2;b712f9af2c12:36473 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:2;b712f9af2c12:36473. 2024-11-23T15:45:06,645 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:34855-0x10169a8366e0000, quorum=127.0.0.1:64492, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-23T15:45:06,645 DEBUG [RS:2;b712f9af2c12:36473 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-23T15:45:06,645 DEBUG [RS:2;b712f9af2c12:36473 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T15:45:06,646 INFO [RS:2;b712f9af2c12:36473 {}] regionserver.HRegionServer(976): stopping server b712f9af2c12,36473,1732376701950; all regions closed. 2024-11-23T15:45:06,646 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-23T15:45:06,646 INFO [RS:1;b712f9af2c12:45337 {}] regionserver.HRegionServer(959): stopping server b712f9af2c12,45337,1732376701834 2024-11-23T15:45:06,646 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:34219-0x10169a8366e0001, quorum=127.0.0.1:64492, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-23T15:45:06,646 INFO [RS:1;b712f9af2c12:45337 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-23T15:45:06,646 INFO [RS:1;b712f9af2c12:45337 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;b712f9af2c12:45337. 2024-11-23T15:45:06,646 INFO [RS:0;b712f9af2c12:34219 {}] regionserver.HRegionServer(878): Closing user regions 2024-11-23T15:45:06,646 DEBUG [RS:1;b712f9af2c12:45337 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-23T15:45:06,647 DEBUG [RS:1;b712f9af2c12:45337 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T15:45:06,647 INFO [RS:1;b712f9af2c12:45337 {}] regionserver.HRegionServer(976): stopping server b712f9af2c12,45337,1732376701834; all regions closed. 2024-11-23T15:45:06,647 INFO [RS:0;b712f9af2c12:34219 {}] regionserver.HRegionServer(3091): Received CLOSE for 0879d9354bcb63ac1a30dc0c6cc2d387 2024-11-23T15:45:06,648 DEBUG [RS_CLOSE_REGION-regionserver/b712f9af2c12:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 0879d9354bcb63ac1a30dc0c6cc2d387, disabling compactions & flushes 2024-11-23T15:45:06,648 INFO [RS_CLOSE_REGION-regionserver/b712f9af2c12:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1732376705219.0879d9354bcb63ac1a30dc0c6cc2d387. 2024-11-23T15:45:06,648 DEBUG [RS_CLOSE_REGION-regionserver/b712f9af2c12:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1732376705219.0879d9354bcb63ac1a30dc0c6cc2d387. 2024-11-23T15:45:06,648 DEBUG [RS_CLOSE_REGION-regionserver/b712f9af2c12:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1732376705219.0879d9354bcb63ac1a30dc0c6cc2d387. after waiting 0 ms 2024-11-23T15:45:06,648 DEBUG [RS_CLOSE_REGION-regionserver/b712f9af2c12:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1732376705219.0879d9354bcb63ac1a30dc0c6cc2d387. 2024-11-23T15:45:06,652 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43305 is added to blk_1073741827_1017 (size=93) 2024-11-23T15:45:06,652 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45071 is added to blk_1073741827_1017 (size=93) 2024-11-23T15:45:06,653 INFO [RS:0;b712f9af2c12:34219 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-23T15:45:06,653 INFO [RS:0;b712f9af2c12:34219 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-23T15:45:06,653 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-23T15:45:06,653 INFO [RS:0;b712f9af2c12:34219 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-23T15:45:06,653 INFO [RS:0;b712f9af2c12:34219 {}] regionserver.HRegionServer(959): stopping server b712f9af2c12,34219,1732376701684 2024-11-23T15:45:06,653 INFO [RS:0;b712f9af2c12:34219 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-23T15:45:06,653 INFO [RS:0;b712f9af2c12:34219 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;b712f9af2c12:34219. 2024-11-23T15:45:06,653 DEBUG [RS:0;b712f9af2c12:34219 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-23T15:45:06,653 DEBUG [RS:0;b712f9af2c12:34219 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T15:45:06,654 INFO [RS:0;b712f9af2c12:34219 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-23T15:45:06,654 INFO [RS:0;b712f9af2c12:34219 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-23T15:45:06,654 INFO [RS:0;b712f9af2c12:34219 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-23T15:45:06,654 INFO [RS:0;b712f9af2c12:34219 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-23T15:45:06,658 INFO [regionserver/b712f9af2c12:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-23T15:45:06,662 DEBUG [RS:2;b712f9af2c12:36473 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/586bc8a6-e95a-79e2-f88e-ae2dc972e8e6/oldWALs 2024-11-23T15:45:06,662 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073741828_1018 (size=93) 2024-11-23T15:45:06,662 INFO [RS:2;b712f9af2c12:36473 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL b712f9af2c12%2C36473%2C1732376701950:(num 1732376704231) 2024-11-23T15:45:06,662 DEBUG [RS:2;b712f9af2c12:36473 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T15:45:06,662 INFO [RS:2;b712f9af2c12:36473 {}] regionserver.LeaseManager(133): Closed leases 2024-11-23T15:45:06,662 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43305 is added to blk_1073741828_1018 (size=93) 2024-11-23T15:45:06,663 INFO [RS:2;b712f9af2c12:36473 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-23T15:45:06,663 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45071 is added to blk_1073741828_1018 (size=93) 2024-11-23T15:45:06,663 INFO [RS:2;b712f9af2c12:36473 {}] hbase.ChoreService(370): Chore service for: regionserver/b712f9af2c12:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-23T15:45:06,663 INFO [RS:2;b712f9af2c12:36473 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-23T15:45:06,663 INFO [RS:2;b712f9af2c12:36473 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-23T15:45:06,663 INFO [regionserver/b712f9af2c12:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-23T15:45:06,663 INFO [RS:2;b712f9af2c12:36473 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-23T15:45:06,663 INFO [RS:2;b712f9af2c12:36473 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-23T15:45:06,664 INFO [RS:2;b712f9af2c12:36473 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:36473 2024-11-23T15:45:06,668 DEBUG [RS:1;b712f9af2c12:45337 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/586bc8a6-e95a-79e2-f88e-ae2dc972e8e6/oldWALs 2024-11-23T15:45:06,668 INFO [RS:1;b712f9af2c12:45337 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL b712f9af2c12%2C45337%2C1732376701834:(num 1732376704231) 2024-11-23T15:45:06,668 DEBUG [RS:1;b712f9af2c12:45337 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T15:45:06,668 INFO [RS:1;b712f9af2c12:45337 {}] regionserver.LeaseManager(133): Closed leases 2024-11-23T15:45:06,668 INFO [RS:1;b712f9af2c12:45337 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-23T15:45:06,668 INFO [RS:1;b712f9af2c12:45337 {}] hbase.ChoreService(370): Chore service for: regionserver/b712f9af2c12:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-23T15:45:06,668 INFO [RS:1;b712f9af2c12:45337 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-23T15:45:06,668 INFO [RS:1;b712f9af2c12:45337 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-23T15:45:06,668 INFO [regionserver/b712f9af2c12:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-23T15:45:06,668 INFO [RS:1;b712f9af2c12:45337 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-23T15:45:06,668 INFO [RS:1;b712f9af2c12:45337 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-23T15:45:06,668 INFO [RS:1;b712f9af2c12:45337 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:45337 2024-11-23T15:45:06,671 INFO [RS:0;b712f9af2c12:34219 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-23T15:45:06,671 DEBUG [RS_CLOSE_META-regionserver/b712f9af2c12:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-23T15:45:06,671 DEBUG [RS:0;b712f9af2c12:34219 {}] regionserver.HRegionServer(1325): Online Regions={0879d9354bcb63ac1a30dc0c6cc2d387=TestHBaseWalOnEC,,1732376705219.0879d9354bcb63ac1a30dc0c6cc2d387., 1588230740=hbase:meta,,1.1588230740} 2024-11-23T15:45:06,671 INFO [RS_CLOSE_META-regionserver/b712f9af2c12:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-23T15:45:06,671 DEBUG [RS_CLOSE_META-regionserver/b712f9af2c12:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-23T15:45:06,671 DEBUG [RS_CLOSE_META-regionserver/b712f9af2c12:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-23T15:45:06,671 DEBUG [RS_CLOSE_META-regionserver/b712f9af2c12:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-23T15:45:06,671 DEBUG [RS:0;b712f9af2c12:34219 {}] regionserver.HRegionServer(1351): Waiting on 0879d9354bcb63ac1a30dc0c6cc2d387, 1588230740 2024-11-23T15:45:06,671 INFO [RS_CLOSE_META-regionserver/b712f9af2c12:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.34 KB heapSize=3.38 KB 2024-11-23T15:45:06,675 DEBUG [pool-77-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36473-0x10169a8366e0003, quorum=127.0.0.1:64492, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/b712f9af2c12,36473,1732376701950 2024-11-23T15:45:06,675 INFO [RS:2;b712f9af2c12:36473 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-23T15:45:06,676 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34855-0x10169a8366e0000, quorum=127.0.0.1:64492, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-23T15:45:06,684 INFO [RS:1;b712f9af2c12:45337 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-23T15:45:06,684 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45337-0x10169a8366e0002, quorum=127.0.0.1:64492, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/b712f9af2c12,45337,1732376701834 2024-11-23T15:45:06,684 INFO [regionserver/b712f9af2c12:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-23T15:45:06,684 INFO [regionserver/b712f9af2c12:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-23T15:45:06,684 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [b712f9af2c12,45337,1732376701834] 2024-11-23T15:45:06,684 ERROR [Time-limited test-EventThread {}] zookeeper.ClientCnxn$EventThread(581): Error while calling watcher. java.util.concurrent.RejectedExecutionException: Task org.apache.hadoop.hbase.trace.TraceUtil$$Lambda$366/0x00007f5cf88f5288@3724a7d3 rejected from java.util.concurrent.ThreadPoolExecutor@72262f53[Terminated, pool size = 0, active threads = 0, queued tasks = 0, completed tasks = 14] at java.util.concurrent.ThreadPoolExecutor$AbortPolicy.rejectedExecution(ThreadPoolExecutor.java:2065) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.reject(ThreadPoolExecutor.java:833) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.execute(ThreadPoolExecutor.java:1365) ~[?:?] at java.util.concurrent.Executors$DelegatedExecutorService.execute(Executors.java:721) ~[?:?] at org.apache.hadoop.hbase.zookeeper.ZKWatcher.process(ZKWatcher.java:613) ~[hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.zookeeper.ClientCnxn$EventThread.processEvent(ClientCnxn.java:579) ~[zookeeper-3.8.4.jar:3.8.4] at org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:554) ~[zookeeper-3.8.4.jar:3.8.4] 2024-11-23T15:45:06,700 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/b712f9af2c12,45337,1732376701834 already deleted, retry=false 2024-11-23T15:45:06,700 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; b712f9af2c12,45337,1732376701834 expired; onlineServers=2 2024-11-23T15:45:06,700 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [b712f9af2c12,36473,1732376701950] 2024-11-23T15:45:06,709 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/b712f9af2c12,36473,1732376701950 already deleted, retry=false 2024-11-23T15:45:06,709 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; b712f9af2c12,36473,1732376701950 expired; onlineServers=1 2024-11-23T15:45:06,710 DEBUG [RS_CLOSE_REGION-regionserver/b712f9af2c12:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37303/user/jenkins/test-data/586bc8a6-e95a-79e2-f88e-ae2dc972e8e6/data/default/TestHBaseWalOnEC/0879d9354bcb63ac1a30dc0c6cc2d387/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-11-23T15:45:06,717 INFO [RS_CLOSE_REGION-regionserver/b712f9af2c12:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1732376705219.0879d9354bcb63ac1a30dc0c6cc2d387. 2024-11-23T15:45:06,717 DEBUG [RS_CLOSE_REGION-regionserver/b712f9af2c12:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 0879d9354bcb63ac1a30dc0c6cc2d387: Waiting for close lock at 1732376706648Running coprocessor pre-close hooks at 1732376706648Disabling compacts and flushes for region at 1732376706648Disabling writes for close at 1732376706648Writing region close event to WAL at 1732376706657 (+9 ms)Running coprocessor post-close hooks at 1732376706715 (+58 ms)Closed at 1732376706717 (+2 ms) 2024-11-23T15:45:06,718 DEBUG [RS_CLOSE_REGION-regionserver/b712f9af2c12:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestHBaseWalOnEC,,1732376705219.0879d9354bcb63ac1a30dc0c6cc2d387. 2024-11-23T15:45:06,722 DEBUG [RS_CLOSE_META-regionserver/b712f9af2c12:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37303/user/jenkins/test-data/586bc8a6-e95a-79e2-f88e-ae2dc972e8e6/data/hbase/meta/1588230740/.tmp/info/4b72749bef93466f88a595ce24b3caeb is 153, key is TestHBaseWalOnEC,,1732376705219.0879d9354bcb63ac1a30dc0c6cc2d387./info:regioninfo/1732376705744/Put/seqid=0 2024-11-23T15:45:06,725 WARN [RS_CLOSE_META-regionserver/b712f9af2c12:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-23T15:45:06,725 WARN [RS_CLOSE_META-regionserver/b712f9af2c12:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-23T15:45:06,730 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1560226346_22 at /127.0.0.1:40492 [Receiving block BP-672935730-172.17.0.2-1732376696384:blk_-9223372036854775632_1026] {}] datanode.DataXceiver(331): 127.0.0.1:43305:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:40492 dst: /127.0.0.1:43305 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:45:06,735 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43305 is added to blk_-9223372036854775632_1027 (size=6637) 2024-11-23T15:45:06,736 WARN [RS_CLOSE_META-regionserver/b712f9af2c12:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-23T15:45:06,736 INFO [RS_CLOSE_META-regionserver/b712f9af2c12:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.18 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:37303/user/jenkins/test-data/586bc8a6-e95a-79e2-f88e-ae2dc972e8e6/data/hbase/meta/1588230740/.tmp/info/4b72749bef93466f88a595ce24b3caeb 2024-11-23T15:45:06,772 DEBUG [RS_CLOSE_META-regionserver/b712f9af2c12:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37303/user/jenkins/test-data/586bc8a6-e95a-79e2-f88e-ae2dc972e8e6/data/hbase/meta/1588230740/.tmp/ns/afdc3f90e66e4c1390499e0956ac0980 is 43, key is default/ns:d/1732376704958/Put/seqid=0 2024-11-23T15:45:06,775 WARN [RS_CLOSE_META-regionserver/b712f9af2c12:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-23T15:45:06,775 WARN [RS_CLOSE_META-regionserver/b712f9af2c12:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-23T15:45:06,780 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1560226346_22 at /127.0.0.1:40522 [Receiving block BP-672935730-172.17.0.2-1732376696384:blk_-9223372036854775616_1028] {}] datanode.DataXceiver(331): 127.0.0.1:43305:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:40522 dst: /127.0.0.1:43305 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:45:06,787 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43305 is added to blk_-9223372036854775616_1029 (size=5153) 2024-11-23T15:45:06,788 WARN [RS_CLOSE_META-regionserver/b712f9af2c12:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-23T15:45:06,788 INFO [RS_CLOSE_META-regionserver/b712f9af2c12:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:37303/user/jenkins/test-data/586bc8a6-e95a-79e2-f88e-ae2dc972e8e6/data/hbase/meta/1588230740/.tmp/ns/afdc3f90e66e4c1390499e0956ac0980 2024-11-23T15:45:06,792 DEBUG [pool-77-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36473-0x10169a8366e0003, quorum=127.0.0.1:64492, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-23T15:45:06,792 DEBUG [pool-77-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36473-0x10169a8366e0003, quorum=127.0.0.1:64492, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-23T15:45:06,793 INFO [RS:2;b712f9af2c12:36473 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-23T15:45:06,793 INFO [RS:2;b712f9af2c12:36473 {}] regionserver.HRegionServer(1031): Exiting; stopping=b712f9af2c12,36473,1732376701950; zookeeper connection closed. 2024-11-23T15:45:06,793 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@2f8662b4 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@2f8662b4 2024-11-23T15:45:06,800 INFO [RS:1;b712f9af2c12:45337 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-23T15:45:06,800 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45337-0x10169a8366e0002, quorum=127.0.0.1:64492, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-23T15:45:06,800 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45337-0x10169a8366e0002, quorum=127.0.0.1:64492, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-23T15:45:06,800 INFO [RS:1;b712f9af2c12:45337 {}] regionserver.HRegionServer(1031): Exiting; stopping=b712f9af2c12,45337,1732376701834; zookeeper connection closed. 2024-11-23T15:45:06,801 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@37427f30 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@37427f30 2024-11-23T15:45:06,815 DEBUG [RS_CLOSE_META-regionserver/b712f9af2c12:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37303/user/jenkins/test-data/586bc8a6-e95a-79e2-f88e-ae2dc972e8e6/data/hbase/meta/1588230740/.tmp/table/85c6d694468847798e25b5f77ba07ba1 is 52, key is TestHBaseWalOnEC/table:state/1732376705782/Put/seqid=0 2024-11-23T15:45:06,818 WARN [RS_CLOSE_META-regionserver/b712f9af2c12:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-23T15:45:06,818 WARN [RS_CLOSE_META-regionserver/b712f9af2c12:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-23T15:45:06,831 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1560226346_22 at /127.0.0.1:40534 [Receiving block BP-672935730-172.17.0.2-1732376696384:blk_-9223372036854775600_1030] {}] datanode.DataXceiver(331): 127.0.0.1:43305:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:40534 dst: /127.0.0.1:43305 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:45:06,835 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43305 is added to blk_-9223372036854775600_1031 (size=5249) 2024-11-23T15:45:06,835 WARN [RS_CLOSE_META-regionserver/b712f9af2c12:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-23T15:45:06,836 INFO [RS_CLOSE_META-regionserver/b712f9af2c12:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=96 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:37303/user/jenkins/test-data/586bc8a6-e95a-79e2-f88e-ae2dc972e8e6/data/hbase/meta/1588230740/.tmp/table/85c6d694468847798e25b5f77ba07ba1 2024-11-23T15:45:06,849 DEBUG [RS_CLOSE_META-regionserver/b712f9af2c12:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37303/user/jenkins/test-data/586bc8a6-e95a-79e2-f88e-ae2dc972e8e6/data/hbase/meta/1588230740/.tmp/info/4b72749bef93466f88a595ce24b3caeb as hdfs://localhost:37303/user/jenkins/test-data/586bc8a6-e95a-79e2-f88e-ae2dc972e8e6/data/hbase/meta/1588230740/info/4b72749bef93466f88a595ce24b3caeb 2024-11-23T15:45:06,860 INFO [RS_CLOSE_META-regionserver/b712f9af2c12:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37303/user/jenkins/test-data/586bc8a6-e95a-79e2-f88e-ae2dc972e8e6/data/hbase/meta/1588230740/info/4b72749bef93466f88a595ce24b3caeb, entries=10, sequenceid=11, filesize=6.5 K 2024-11-23T15:45:06,862 DEBUG [RS_CLOSE_META-regionserver/b712f9af2c12:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37303/user/jenkins/test-data/586bc8a6-e95a-79e2-f88e-ae2dc972e8e6/data/hbase/meta/1588230740/.tmp/ns/afdc3f90e66e4c1390499e0956ac0980 as hdfs://localhost:37303/user/jenkins/test-data/586bc8a6-e95a-79e2-f88e-ae2dc972e8e6/data/hbase/meta/1588230740/ns/afdc3f90e66e4c1390499e0956ac0980 2024-11-23T15:45:06,872 DEBUG [RS:0;b712f9af2c12:34219 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-23T15:45:06,872 INFO [RS_CLOSE_META-regionserver/b712f9af2c12:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37303/user/jenkins/test-data/586bc8a6-e95a-79e2-f88e-ae2dc972e8e6/data/hbase/meta/1588230740/ns/afdc3f90e66e4c1390499e0956ac0980, entries=2, sequenceid=11, filesize=5.0 K 2024-11-23T15:45:06,874 DEBUG [RS_CLOSE_META-regionserver/b712f9af2c12:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37303/user/jenkins/test-data/586bc8a6-e95a-79e2-f88e-ae2dc972e8e6/data/hbase/meta/1588230740/.tmp/table/85c6d694468847798e25b5f77ba07ba1 as hdfs://localhost:37303/user/jenkins/test-data/586bc8a6-e95a-79e2-f88e-ae2dc972e8e6/data/hbase/meta/1588230740/table/85c6d694468847798e25b5f77ba07ba1 2024-11-23T15:45:06,883 INFO [RS_CLOSE_META-regionserver/b712f9af2c12:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37303/user/jenkins/test-data/586bc8a6-e95a-79e2-f88e-ae2dc972e8e6/data/hbase/meta/1588230740/table/85c6d694468847798e25b5f77ba07ba1, entries=2, sequenceid=11, filesize=5.1 K 2024-11-23T15:45:06,885 INFO [RS_CLOSE_META-regionserver/b712f9af2c12:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 213ms, sequenceid=11, compaction requested=false 2024-11-23T15:45:06,885 DEBUG [RS_CLOSE_META-regionserver/b712f9af2c12:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-23T15:45:06,908 DEBUG [RS_CLOSE_META-regionserver/b712f9af2c12:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37303/user/jenkins/test-data/586bc8a6-e95a-79e2-f88e-ae2dc972e8e6/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-23T15:45:06,909 DEBUG [RS_CLOSE_META-regionserver/b712f9af2c12:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-23T15:45:06,909 INFO [RS_CLOSE_META-regionserver/b712f9af2c12:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-23T15:45:06,910 DEBUG [RS_CLOSE_META-regionserver/b712f9af2c12:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732376706671Running coprocessor pre-close hooks at 1732376706671Disabling compacts and flushes for region at 1732376706671Disabling writes for close at 1732376706671Obtaining lock to block concurrent updates at 1732376706671Preparing flush snapshotting stores in 1588230740 at 1732376706671Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1377, getHeapSize=3392, getOffHeapSize=0, getCellsCount=14 at 1732376706672 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1732376706673 (+1 ms)Flushing 1588230740/info: creating writer at 1732376706673Flushing 1588230740/info: appending metadata at 1732376706717 (+44 ms)Flushing 1588230740/info: closing flushed file at 1732376706717Flushing 1588230740/ns: creating writer at 1732376706750 (+33 ms)Flushing 1588230740/ns: appending metadata at 1732376706770 (+20 ms)Flushing 1588230740/ns: closing flushed file at 1732376706770Flushing 1588230740/table: creating writer at 1732376706797 (+27 ms)Flushing 1588230740/table: appending metadata at 1732376706814 (+17 ms)Flushing 1588230740/table: closing flushed file at 1732376706814Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@73a9b137: reopening flushed file at 1732376706847 (+33 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4a746899: reopening flushed file at 1732376706861 (+14 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2faa435c: reopening flushed file at 1732376706872 (+11 ms)Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 213ms, sequenceid=11, compaction requested=false at 1732376706885 (+13 ms)Writing region close event to WAL at 1732376706894 (+9 ms)Running coprocessor post-close hooks at 1732376706909 (+15 ms)Closed at 1732376706909 2024-11-23T15:45:06,910 DEBUG [RS_CLOSE_META-regionserver/b712f9af2c12:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-23T15:45:07,056 INFO [regionserver/b712f9af2c12:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-23T15:45:07,056 INFO [regionserver/b712f9af2c12:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-23T15:45:07,072 INFO [RS:0;b712f9af2c12:34219 {}] regionserver.HRegionServer(976): stopping server b712f9af2c12,34219,1732376701684; all regions closed. 2024-11-23T15:45:07,075 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43305 is added to blk_1073741829_1019 (size=2751) 2024-11-23T15:45:07,076 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45071 is added to blk_1073741829_1019 (size=2751) 2024-11-23T15:45:07,079 DEBUG [RS:0;b712f9af2c12:34219 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/586bc8a6-e95a-79e2-f88e-ae2dc972e8e6/oldWALs 2024-11-23T15:45:07,079 INFO [RS:0;b712f9af2c12:34219 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL b712f9af2c12%2C34219%2C1732376701684.meta:.meta(num 1732376704712) 2024-11-23T15:45:07,082 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43305 is added to blk_1073741826_1016 (size=1298) 2024-11-23T15:45:07,082 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45071 is added to blk_1073741826_1016 (size=1298) 2024-11-23T15:45:07,085 DEBUG [RS:0;b712f9af2c12:34219 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/586bc8a6-e95a-79e2-f88e-ae2dc972e8e6/oldWALs 2024-11-23T15:45:07,085 INFO [RS:0;b712f9af2c12:34219 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL b712f9af2c12%2C34219%2C1732376701684:(num 1732376704232) 2024-11-23T15:45:07,085 DEBUG [RS:0;b712f9af2c12:34219 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T15:45:07,086 INFO [RS:0;b712f9af2c12:34219 {}] regionserver.LeaseManager(133): Closed leases 2024-11-23T15:45:07,086 INFO [RS:0;b712f9af2c12:34219 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-23T15:45:07,087 INFO [RS:0;b712f9af2c12:34219 {}] hbase.ChoreService(370): Chore service for: regionserver/b712f9af2c12:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-23T15:45:07,087 INFO [RS:0;b712f9af2c12:34219 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-23T15:45:07,087 INFO [regionserver/b712f9af2c12:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-23T15:45:07,087 INFO [RS:0;b712f9af2c12:34219 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:34219 2024-11-23T15:45:07,158 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34855-0x10169a8366e0000, quorum=127.0.0.1:64492, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-23T15:45:07,158 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34219-0x10169a8366e0001, quorum=127.0.0.1:64492, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/b712f9af2c12,34219,1732376701684 2024-11-23T15:45:07,159 INFO [RS:0;b712f9af2c12:34219 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-23T15:45:07,159 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [b712f9af2c12,34219,1732376701684] 2024-11-23T15:45:07,175 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/b712f9af2c12,34219,1732376701684 already deleted, retry=false 2024-11-23T15:45:07,175 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; b712f9af2c12,34219,1732376701684 expired; onlineServers=0 2024-11-23T15:45:07,175 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'b712f9af2c12,34855,1732376700708' ***** 2024-11-23T15:45:07,176 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-23T15:45:07,176 INFO [M:0;b712f9af2c12:34855 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-23T15:45:07,176 INFO [M:0;b712f9af2c12:34855 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-23T15:45:07,176 DEBUG [M:0;b712f9af2c12:34855 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-23T15:45:07,176 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-23T15:45:07,176 DEBUG [M:0;b712f9af2c12:34855 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-23T15:45:07,176 DEBUG [master/b712f9af2c12:0:becomeActiveMaster-HFileCleaner.large.0-1732376703806 {}] cleaner.HFileCleaner(306): Exit Thread[master/b712f9af2c12:0:becomeActiveMaster-HFileCleaner.large.0-1732376703806,5,FailOnTimeoutGroup] 2024-11-23T15:45:07,176 INFO [M:0;b712f9af2c12:34855 {}] hbase.ChoreService(370): Chore service for: master/b712f9af2c12:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-23T15:45:07,176 INFO [M:0;b712f9af2c12:34855 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-23T15:45:07,177 DEBUG [master/b712f9af2c12:0:becomeActiveMaster-HFileCleaner.small.0-1732376703819 {}] cleaner.HFileCleaner(306): Exit Thread[master/b712f9af2c12:0:becomeActiveMaster-HFileCleaner.small.0-1732376703819,5,FailOnTimeoutGroup] 2024-11-23T15:45:07,177 DEBUG [M:0;b712f9af2c12:34855 {}] master.HMaster(1795): Stopping service threads 2024-11-23T15:45:07,177 INFO [M:0;b712f9af2c12:34855 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-23T15:45:07,177 INFO [M:0;b712f9af2c12:34855 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-23T15:45:07,178 INFO [M:0;b712f9af2c12:34855 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-23T15:45:07,178 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-23T15:45:07,183 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34855-0x10169a8366e0000, quorum=127.0.0.1:64492, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-23T15:45:07,184 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34855-0x10169a8366e0000, quorum=127.0.0.1:64492, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T15:45:07,184 DEBUG [M:0;b712f9af2c12:34855 {}] zookeeper.ZKUtil(347): master:34855-0x10169a8366e0000, quorum=127.0.0.1:64492, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-23T15:45:07,184 WARN [M:0;b712f9af2c12:34855 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-23T15:45:07,185 INFO [M:0;b712f9af2c12:34855 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:37303/user/jenkins/test-data/586bc8a6-e95a-79e2-f88e-ae2dc972e8e6/.lastflushedseqids 2024-11-23T15:45:07,193 WARN [M:0;b712f9af2c12:34855 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-23T15:45:07,193 WARN [M:0;b712f9af2c12:34855 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-23T15:45:07,195 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-648790401_22 at /127.0.0.1:39716 [Receiving block BP-672935730-172.17.0.2-1732376696384:blk_-9223372036854775584_1032] {}] datanode.DataXceiver(331): 127.0.0.1:45071:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:39716 dst: /127.0.0.1:45071 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:45:07,199 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45071 is added to blk_-9223372036854775584_1033 (size=127) 2024-11-23T15:45:07,200 WARN [M:0;b712f9af2c12:34855 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-23T15:45:07,200 INFO [M:0;b712f9af2c12:34855 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-23T15:45:07,200 INFO [M:0;b712f9af2c12:34855 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-23T15:45:07,200 DEBUG [M:0;b712f9af2c12:34855 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-23T15:45:07,200 INFO [M:0;b712f9af2c12:34855 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-23T15:45:07,200 DEBUG [M:0;b712f9af2c12:34855 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-23T15:45:07,200 DEBUG [M:0;b712f9af2c12:34855 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-23T15:45:07,200 DEBUG [M:0;b712f9af2c12:34855 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-23T15:45:07,200 INFO [M:0;b712f9af2c12:34855 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=26.81 KB heapSize=34.10 KB 2024-11-23T15:45:07,219 DEBUG [M:0;b712f9af2c12:34855 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37303/user/jenkins/test-data/586bc8a6-e95a-79e2-f88e-ae2dc972e8e6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/bb5591e1a3964216aed928ff2a9b2a6c is 82, key is hbase:meta,,1/info:regioninfo/1732376704817/Put/seqid=0 2024-11-23T15:45:07,222 WARN [M:0;b712f9af2c12:34855 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-23T15:45:07,222 WARN [M:0;b712f9af2c12:34855 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-23T15:45:07,226 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-648790401_22 at /127.0.0.1:39748 [Receiving block BP-672935730-172.17.0.2-1732376696384:blk_-9223372036854775568_1034] {}] datanode.DataXceiver(331): 127.0.0.1:45071:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:39748 dst: /127.0.0.1:45071 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:45:07,236 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45071 is added to blk_-9223372036854775568_1035 (size=5672) 2024-11-23T15:45:07,236 WARN [M:0;b712f9af2c12:34855 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-23T15:45:07,237 INFO [M:0;b712f9af2c12:34855 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:37303/user/jenkins/test-data/586bc8a6-e95a-79e2-f88e-ae2dc972e8e6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/bb5591e1a3964216aed928ff2a9b2a6c 2024-11-23T15:45:07,265 DEBUG [M:0;b712f9af2c12:34855 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37303/user/jenkins/test-data/586bc8a6-e95a-79e2-f88e-ae2dc972e8e6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/6c11785836d64285a98c770b7ce4c467 is 747, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1732376705802/Put/seqid=0 2024-11-23T15:45:07,267 INFO [RS:0;b712f9af2c12:34219 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-23T15:45:07,267 INFO [RS:0;b712f9af2c12:34219 {}] regionserver.HRegionServer(1031): Exiting; stopping=b712f9af2c12,34219,1732376701684; zookeeper connection closed. 2024-11-23T15:45:07,267 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34219-0x10169a8366e0001, quorum=127.0.0.1:64492, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-23T15:45:07,267 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34219-0x10169a8366e0001, quorum=127.0.0.1:64492, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-23T15:45:07,267 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@54c9fb03 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@54c9fb03 2024-11-23T15:45:07,268 WARN [M:0;b712f9af2c12:34855 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-23T15:45:07,268 WARN [M:0;b712f9af2c12:34855 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-23T15:45:07,268 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 3 regionserver(s) complete 2024-11-23T15:45:07,273 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-648790401_22 at /127.0.0.1:39768 [Receiving block BP-672935730-172.17.0.2-1732376696384:blk_-9223372036854775552_1036] {}] datanode.DataXceiver(331): 127.0.0.1:45071:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:39768 dst: /127.0.0.1:45071 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:45:07,277 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45071 is added to blk_-9223372036854775552_1037 (size=6437) 2024-11-23T15:45:07,278 WARN [M:0;b712f9af2c12:34855 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-23T15:45:07,278 INFO [M:0;b712f9af2c12:34855 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.12 KB at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:37303/user/jenkins/test-data/586bc8a6-e95a-79e2-f88e-ae2dc972e8e6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/6c11785836d64285a98c770b7ce4c467 2024-11-23T15:45:07,307 DEBUG [M:0;b712f9af2c12:34855 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37303/user/jenkins/test-data/586bc8a6-e95a-79e2-f88e-ae2dc972e8e6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/60abc176788a4877b022e3c25d7eacd1 is 69, key is b712f9af2c12,34219,1732376701684/rs:state/1732376703903/Put/seqid=0 2024-11-23T15:45:07,309 WARN [M:0;b712f9af2c12:34855 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-23T15:45:07,310 WARN [M:0;b712f9af2c12:34855 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-23T15:45:07,312 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-648790401_22 at /127.0.0.1:53576 [Receiving block BP-672935730-172.17.0.2-1732376696384:blk_-9223372036854775536_1038] {}] datanode.DataXceiver(331): 127.0.0.1:46329:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:53576 dst: /127.0.0.1:46329 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:45:07,316 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_-9223372036854775536_1039 (size=5294) 2024-11-23T15:45:07,317 WARN [M:0;b712f9af2c12:34855 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-23T15:45:07,317 INFO [M:0;b712f9af2c12:34855 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=195 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:37303/user/jenkins/test-data/586bc8a6-e95a-79e2-f88e-ae2dc972e8e6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/60abc176788a4877b022e3c25d7eacd1 2024-11-23T15:45:07,328 DEBUG [M:0;b712f9af2c12:34855 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37303/user/jenkins/test-data/586bc8a6-e95a-79e2-f88e-ae2dc972e8e6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/bb5591e1a3964216aed928ff2a9b2a6c as hdfs://localhost:37303/user/jenkins/test-data/586bc8a6-e95a-79e2-f88e-ae2dc972e8e6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/bb5591e1a3964216aed928ff2a9b2a6c 2024-11-23T15:45:07,336 INFO [M:0;b712f9af2c12:34855 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37303/user/jenkins/test-data/586bc8a6-e95a-79e2-f88e-ae2dc972e8e6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/bb5591e1a3964216aed928ff2a9b2a6c, entries=8, sequenceid=72, filesize=5.5 K 2024-11-23T15:45:07,338 DEBUG [M:0;b712f9af2c12:34855 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37303/user/jenkins/test-data/586bc8a6-e95a-79e2-f88e-ae2dc972e8e6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/6c11785836d64285a98c770b7ce4c467 as hdfs://localhost:37303/user/jenkins/test-data/586bc8a6-e95a-79e2-f88e-ae2dc972e8e6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/6c11785836d64285a98c770b7ce4c467 2024-11-23T15:45:07,349 INFO [M:0;b712f9af2c12:34855 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37303/user/jenkins/test-data/586bc8a6-e95a-79e2-f88e-ae2dc972e8e6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/6c11785836d64285a98c770b7ce4c467, entries=8, sequenceid=72, filesize=6.3 K 2024-11-23T15:45:07,351 DEBUG [M:0;b712f9af2c12:34855 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37303/user/jenkins/test-data/586bc8a6-e95a-79e2-f88e-ae2dc972e8e6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/60abc176788a4877b022e3c25d7eacd1 as hdfs://localhost:37303/user/jenkins/test-data/586bc8a6-e95a-79e2-f88e-ae2dc972e8e6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/60abc176788a4877b022e3c25d7eacd1 2024-11-23T15:45:07,361 INFO [M:0;b712f9af2c12:34855 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37303/user/jenkins/test-data/586bc8a6-e95a-79e2-f88e-ae2dc972e8e6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/60abc176788a4877b022e3c25d7eacd1, entries=3, sequenceid=72, filesize=5.2 K 2024-11-23T15:45:07,363 INFO [M:0;b712f9af2c12:34855 {}] regionserver.HRegion(3140): Finished flush of dataSize ~26.81 KB/27450, heapSize ~33.80 KB/34616, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 162ms, sequenceid=72, compaction requested=false 2024-11-23T15:45:07,367 INFO [M:0;b712f9af2c12:34855 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-23T15:45:07,367 DEBUG [M:0;b712f9af2c12:34855 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732376707200Disabling compacts and flushes for region at 1732376707200Disabling writes for close at 1732376707200Obtaining lock to block concurrent updates at 1732376707200Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732376707200Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=27450, getHeapSize=34856, getOffHeapSize=0, getCellsCount=85 at 1732376707201 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732376707202 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732376707202Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732376707219 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732376707219Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732376707246 (+27 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732376707264 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732376707264Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732376707289 (+25 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732376707307 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732376707307Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2201d417: reopening flushed file at 1732376707327 (+20 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@73039506: reopening flushed file at 1732376707337 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4e945480: reopening flushed file at 1732376707349 (+12 ms)Finished flush of dataSize ~26.81 KB/27450, heapSize ~33.80 KB/34616, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 162ms, sequenceid=72, compaction requested=false at 1732376707363 (+14 ms)Writing region close event to WAL at 1732376707367 (+4 ms)Closed at 1732376707367 2024-11-23T15:45:07,371 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073741825_1011 (size=32653) 2024-11-23T15:45:07,371 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45071 is added to blk_1073741825_1011 (size=32653) 2024-11-23T15:45:07,371 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43305 is added to blk_1073741825_1011 (size=32653) 2024-11-23T15:45:07,373 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-23T15:45:07,373 INFO [M:0;b712f9af2c12:34855 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-23T15:45:07,373 INFO [M:0;b712f9af2c12:34855 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:34855 2024-11-23T15:45:07,373 INFO [M:0;b712f9af2c12:34855 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-23T15:45:07,500 INFO [M:0;b712f9af2c12:34855 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-23T15:45:07,500 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34855-0x10169a8366e0000, quorum=127.0.0.1:64492, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-23T15:45:07,500 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34855-0x10169a8366e0000, quorum=127.0.0.1:64492, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-23T15:45:07,544 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2b1d6f1c{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-23T15:45:07,546 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2fd89fa9{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-23T15:45:07,546 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-23T15:45:07,546 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@403b9e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-23T15:45:07,546 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5dc716dd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ca2e2dc7-7707-5b77-c243-862433037f30/hadoop.log.dir/,STOPPED} 2024-11-23T15:45:07,551 WARN [BP-672935730-172.17.0.2-1732376696384 heartbeating to localhost/127.0.0.1:37303 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-23T15:45:07,551 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-23T15:45:07,551 WARN [BP-672935730-172.17.0.2-1732376696384 heartbeating to localhost/127.0.0.1:37303 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-672935730-172.17.0.2-1732376696384 (Datanode Uuid 289915e3-ee6c-43b0-81da-14b3cbffffb5) service to localhost/127.0.0.1:37303 2024-11-23T15:45:07,551 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-23T15:45:07,552 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ca2e2dc7-7707-5b77-c243-862433037f30/cluster_dc00375d-5416-8448-2106-d59715ddd12b/data/data5/current/BP-672935730-172.17.0.2-1732376696384 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-23T15:45:07,552 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ca2e2dc7-7707-5b77-c243-862433037f30/cluster_dc00375d-5416-8448-2106-d59715ddd12b/data/data6/current/BP-672935730-172.17.0.2-1732376696384 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-23T15:45:07,553 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-23T15:45:07,554 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@61dd7f65{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-23T15:45:07,555 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6d12fc1e{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-23T15:45:07,555 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-23T15:45:07,555 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@56b22eb1{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-23T15:45:07,555 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@630da62d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ca2e2dc7-7707-5b77-c243-862433037f30/hadoop.log.dir/,STOPPED} 2024-11-23T15:45:07,556 WARN [BP-672935730-172.17.0.2-1732376696384 heartbeating to localhost/127.0.0.1:37303 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-23T15:45:07,556 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-23T15:45:07,556 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-23T15:45:07,556 WARN [BP-672935730-172.17.0.2-1732376696384 heartbeating to localhost/127.0.0.1:37303 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-672935730-172.17.0.2-1732376696384 (Datanode Uuid 83b82bbc-f283-4239-9c47-b4d0e5073c91) service to localhost/127.0.0.1:37303 2024-11-23T15:45:07,557 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ca2e2dc7-7707-5b77-c243-862433037f30/cluster_dc00375d-5416-8448-2106-d59715ddd12b/data/data3/current/BP-672935730-172.17.0.2-1732376696384 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-23T15:45:07,557 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ca2e2dc7-7707-5b77-c243-862433037f30/cluster_dc00375d-5416-8448-2106-d59715ddd12b/data/data4/current/BP-672935730-172.17.0.2-1732376696384 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-23T15:45:07,558 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-23T15:45:07,560 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1afaf33c{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-23T15:45:07,560 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@70d6e3ac{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-23T15:45:07,560 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-23T15:45:07,561 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4dc7b16b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-23T15:45:07,561 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7f592c6a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ca2e2dc7-7707-5b77-c243-862433037f30/hadoop.log.dir/,STOPPED} 2024-11-23T15:45:07,562 WARN [BP-672935730-172.17.0.2-1732376696384 heartbeating to localhost/127.0.0.1:37303 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-23T15:45:07,562 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-23T15:45:07,562 WARN [BP-672935730-172.17.0.2-1732376696384 heartbeating to localhost/127.0.0.1:37303 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-672935730-172.17.0.2-1732376696384 (Datanode Uuid 6b35f311-3109-4dc8-8ae5-a1a17f59c404) service to localhost/127.0.0.1:37303 2024-11-23T15:45:07,562 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-23T15:45:07,562 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ca2e2dc7-7707-5b77-c243-862433037f30/cluster_dc00375d-5416-8448-2106-d59715ddd12b/data/data1/current/BP-672935730-172.17.0.2-1732376696384 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-23T15:45:07,563 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ca2e2dc7-7707-5b77-c243-862433037f30/cluster_dc00375d-5416-8448-2106-d59715ddd12b/data/data2/current/BP-672935730-172.17.0.2-1732376696384 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-23T15:45:07,563 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-23T15:45:07,569 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2a105fbd{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-23T15:45:07,569 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2d863f4f{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-23T15:45:07,569 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-23T15:45:07,569 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@189e1f23{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-23T15:45:07,570 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2f7d122b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ca2e2dc7-7707-5b77-c243-862433037f30/hadoop.log.dir/,STOPPED} 2024-11-23T15:45:07,581 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-23T15:45:07,613 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-23T15:45:07,621 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestHBaseWalOnEC#testReadWrite[0] Thread=85 (was 156), OpenFileDescriptor=435 (was 393) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=322 (was 309) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=7907 (was 8631) 2024-11-23T15:45:07,628 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestHBaseWalOnEC#testReadWrite[1] Thread=85, OpenFileDescriptor=435, MaxFileDescriptor=1048576, SystemLoadAverage=322, ProcessCount=11, AvailableMemoryMB=7907 2024-11-23T15:45:07,628 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=3, rsPorts=, rsClass=null, numDataNodes=3, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-23T15:45:07,629 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ca2e2dc7-7707-5b77-c243-862433037f30/hadoop.log.dir so I do NOT create it in target/test-data/55b2995e-c92f-e507-f4f6-4dec8ffd6cfb 2024-11-23T15:45:07,629 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ca2e2dc7-7707-5b77-c243-862433037f30/hadoop.tmp.dir so I do NOT create it in target/test-data/55b2995e-c92f-e507-f4f6-4dec8ffd6cfb 2024-11-23T15:45:07,629 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/55b2995e-c92f-e507-f4f6-4dec8ffd6cfb/cluster_51d8d5c7-f8ba-5e46-1aba-1184a122a7a1, deleteOnExit=true 2024-11-23T15:45:07,629 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-23T15:45:07,629 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/55b2995e-c92f-e507-f4f6-4dec8ffd6cfb/test.cache.data in system properties and HBase conf 2024-11-23T15:45:07,629 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/55b2995e-c92f-e507-f4f6-4dec8ffd6cfb/hadoop.tmp.dir in system properties and HBase conf 2024-11-23T15:45:07,629 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/55b2995e-c92f-e507-f4f6-4dec8ffd6cfb/hadoop.log.dir in system properties and HBase conf 2024-11-23T15:45:07,630 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/55b2995e-c92f-e507-f4f6-4dec8ffd6cfb/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-23T15:45:07,630 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/55b2995e-c92f-e507-f4f6-4dec8ffd6cfb/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-23T15:45:07,630 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-23T15:45:07,630 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-23T15:45:07,630 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/55b2995e-c92f-e507-f4f6-4dec8ffd6cfb/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-23T15:45:07,630 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/55b2995e-c92f-e507-f4f6-4dec8ffd6cfb/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-23T15:45:07,630 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/55b2995e-c92f-e507-f4f6-4dec8ffd6cfb/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-23T15:45:07,630 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/55b2995e-c92f-e507-f4f6-4dec8ffd6cfb/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-23T15:45:07,631 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/55b2995e-c92f-e507-f4f6-4dec8ffd6cfb/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-23T15:45:07,631 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/55b2995e-c92f-e507-f4f6-4dec8ffd6cfb/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-23T15:45:07,631 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/55b2995e-c92f-e507-f4f6-4dec8ffd6cfb/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-23T15:45:07,631 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/55b2995e-c92f-e507-f4f6-4dec8ffd6cfb/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-23T15:45:07,631 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/55b2995e-c92f-e507-f4f6-4dec8ffd6cfb/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-23T15:45:07,631 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/55b2995e-c92f-e507-f4f6-4dec8ffd6cfb/nfs.dump.dir in system properties and HBase conf 2024-11-23T15:45:07,631 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/55b2995e-c92f-e507-f4f6-4dec8ffd6cfb/java.io.tmpdir in system properties and HBase conf 2024-11-23T15:45:07,631 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/55b2995e-c92f-e507-f4f6-4dec8ffd6cfb/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-23T15:45:07,631 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/55b2995e-c92f-e507-f4f6-4dec8ffd6cfb/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-23T15:45:07,631 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/55b2995e-c92f-e507-f4f6-4dec8ffd6cfb/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-23T15:45:07,898 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-23T15:45:07,903 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-23T15:45:07,909 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-23T15:45:07,909 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-23T15:45:07,910 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-23T15:45:07,910 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-23T15:45:07,915 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@c2635ff{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/55b2995e-c92f-e507-f4f6-4dec8ffd6cfb/hadoop.log.dir/,AVAILABLE} 2024-11-23T15:45:07,915 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@180646a5{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-23T15:45:08,026 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1e35858{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/55b2995e-c92f-e507-f4f6-4dec8ffd6cfb/java.io.tmpdir/jetty-localhost-34903-hadoop-hdfs-3_4_1-tests_jar-_-any-1349338066045902818/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-23T15:45:08,027 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3dacdd96{HTTP/1.1, (http/1.1)}{localhost:34903} 2024-11-23T15:45:08,027 INFO [Time-limited test {}] server.Server(415): Started @13869ms 2024-11-23T15:45:08,277 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-23T15:45:08,281 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-23T15:45:08,283 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-23T15:45:08,283 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-23T15:45:08,283 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-23T15:45:08,283 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@208ccfcf{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/55b2995e-c92f-e507-f4f6-4dec8ffd6cfb/hadoop.log.dir/,AVAILABLE} 2024-11-23T15:45:08,284 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3133e029{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-23T15:45:08,405 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4f80cdad{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/55b2995e-c92f-e507-f4f6-4dec8ffd6cfb/java.io.tmpdir/jetty-localhost-46047-hadoop-hdfs-3_4_1-tests_jar-_-any-8647761922500822325/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-23T15:45:08,406 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@378a2d39{HTTP/1.1, (http/1.1)}{localhost:46047} 2024-11-23T15:45:08,406 INFO [Time-limited test {}] server.Server(415): Started @14247ms 2024-11-23T15:45:08,407 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-23T15:45:08,446 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-23T15:45:08,449 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-23T15:45:08,450 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-23T15:45:08,450 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-23T15:45:08,451 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-23T15:45:08,451 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@31c5f8b8{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/55b2995e-c92f-e507-f4f6-4dec8ffd6cfb/hadoop.log.dir/,AVAILABLE} 2024-11-23T15:45:08,451 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@435873d2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-23T15:45:08,562 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@128bbfcc{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/55b2995e-c92f-e507-f4f6-4dec8ffd6cfb/java.io.tmpdir/jetty-localhost-33895-hadoop-hdfs-3_4_1-tests_jar-_-any-13339658420636377527/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-23T15:45:08,562 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2dba742a{HTTP/1.1, (http/1.1)}{localhost:33895} 2024-11-23T15:45:08,562 INFO [Time-limited test {}] server.Server(415): Started @14403ms 2024-11-23T15:45:08,563 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-23T15:45:08,603 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-23T15:45:08,606 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-23T15:45:08,607 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-23T15:45:08,607 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-23T15:45:08,607 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-23T15:45:08,608 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@78313797{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/55b2995e-c92f-e507-f4f6-4dec8ffd6cfb/hadoop.log.dir/,AVAILABLE} 2024-11-23T15:45:08,608 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4b1a6d57{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-23T15:45:08,709 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2347a2a6{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/55b2995e-c92f-e507-f4f6-4dec8ffd6cfb/java.io.tmpdir/jetty-localhost-42531-hadoop-hdfs-3_4_1-tests_jar-_-any-6394268859829890805/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-23T15:45:08,709 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4c70ad6a{HTTP/1.1, (http/1.1)}{localhost:42531} 2024-11-23T15:45:08,709 INFO [Time-limited test {}] server.Server(415): Started @14551ms 2024-11-23T15:45:08,711 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-23T15:45:09,339 WARN [Thread-563 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/55b2995e-c92f-e507-f4f6-4dec8ffd6cfb/cluster_51d8d5c7-f8ba-5e46-1aba-1184a122a7a1/data/data1/current/BP-269514553-172.17.0.2-1732376707658/current, will proceed with Du for space computation calculation, 2024-11-23T15:45:09,339 WARN [Thread-564 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/55b2995e-c92f-e507-f4f6-4dec8ffd6cfb/cluster_51d8d5c7-f8ba-5e46-1aba-1184a122a7a1/data/data2/current/BP-269514553-172.17.0.2-1732376707658/current, will proceed with Du for space computation calculation, 2024-11-23T15:45:09,362 WARN [Thread-504 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-23T15:45:09,365 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xef6b79d6f26b90a9 with lease ID 0xa5f3ccb36263dfbf: Processing first storage report for DS-01cdfce6-ab58-43d7-ae9f-01b4bcd4000f from datanode DatanodeRegistration(127.0.0.1:34053, datanodeUuid=03222bb6-ac22-4c6a-8b78-4a3f05155367, infoPort=43721, infoSecurePort=0, ipcPort=37101, storageInfo=lv=-57;cid=testClusterID;nsid=345996797;c=1732376707658) 2024-11-23T15:45:09,365 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xef6b79d6f26b90a9 with lease ID 0xa5f3ccb36263dfbf: from storage DS-01cdfce6-ab58-43d7-ae9f-01b4bcd4000f node DatanodeRegistration(127.0.0.1:34053, datanodeUuid=03222bb6-ac22-4c6a-8b78-4a3f05155367, infoPort=43721, infoSecurePort=0, ipcPort=37101, storageInfo=lv=-57;cid=testClusterID;nsid=345996797;c=1732376707658), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-23T15:45:09,365 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xef6b79d6f26b90a9 with lease ID 0xa5f3ccb36263dfbf: Processing first storage report for DS-f4f96e15-f048-43de-9f4b-2dd9b4882392 from datanode DatanodeRegistration(127.0.0.1:34053, datanodeUuid=03222bb6-ac22-4c6a-8b78-4a3f05155367, infoPort=43721, infoSecurePort=0, ipcPort=37101, storageInfo=lv=-57;cid=testClusterID;nsid=345996797;c=1732376707658) 2024-11-23T15:45:09,365 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xef6b79d6f26b90a9 with lease ID 0xa5f3ccb36263dfbf: from storage DS-f4f96e15-f048-43de-9f4b-2dd9b4882392 node DatanodeRegistration(127.0.0.1:34053, datanodeUuid=03222bb6-ac22-4c6a-8b78-4a3f05155367, infoPort=43721, infoSecurePort=0, ipcPort=37101, storageInfo=lv=-57;cid=testClusterID;nsid=345996797;c=1732376707658), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-23T15:45:09,595 WARN [Thread-576 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/55b2995e-c92f-e507-f4f6-4dec8ffd6cfb/cluster_51d8d5c7-f8ba-5e46-1aba-1184a122a7a1/data/data4/current/BP-269514553-172.17.0.2-1732376707658/current, will proceed with Du for space computation calculation, 2024-11-23T15:45:09,595 WARN [Thread-575 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/55b2995e-c92f-e507-f4f6-4dec8ffd6cfb/cluster_51d8d5c7-f8ba-5e46-1aba-1184a122a7a1/data/data3/current/BP-269514553-172.17.0.2-1732376707658/current, will proceed with Du for space computation calculation, 2024-11-23T15:45:09,626 WARN [Thread-527 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-23T15:45:09,630 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xf47b2bf7eb137c3b with lease ID 0xa5f3ccb36263dfc0: Processing first storage report for DS-23cdb414-631f-4934-9d50-b922c33910d6 from datanode DatanodeRegistration(127.0.0.1:33725, datanodeUuid=8a3d0751-f6f1-46ca-b2f7-97c80235db63, infoPort=38073, infoSecurePort=0, ipcPort=41849, storageInfo=lv=-57;cid=testClusterID;nsid=345996797;c=1732376707658) 2024-11-23T15:45:09,631 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf47b2bf7eb137c3b with lease ID 0xa5f3ccb36263dfc0: from storage DS-23cdb414-631f-4934-9d50-b922c33910d6 node DatanodeRegistration(127.0.0.1:33725, datanodeUuid=8a3d0751-f6f1-46ca-b2f7-97c80235db63, infoPort=38073, infoSecurePort=0, ipcPort=41849, storageInfo=lv=-57;cid=testClusterID;nsid=345996797;c=1732376707658), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-23T15:45:09,631 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xf47b2bf7eb137c3b with lease ID 0xa5f3ccb36263dfc0: Processing first storage report for DS-7eada774-9fe7-4da2-92b6-35fda62cf7e7 from datanode DatanodeRegistration(127.0.0.1:33725, datanodeUuid=8a3d0751-f6f1-46ca-b2f7-97c80235db63, infoPort=38073, infoSecurePort=0, ipcPort=41849, storageInfo=lv=-57;cid=testClusterID;nsid=345996797;c=1732376707658) 2024-11-23T15:45:09,631 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf47b2bf7eb137c3b with lease ID 0xa5f3ccb36263dfc0: from storage DS-7eada774-9fe7-4da2-92b6-35fda62cf7e7 node DatanodeRegistration(127.0.0.1:33725, datanodeUuid=8a3d0751-f6f1-46ca-b2f7-97c80235db63, infoPort=38073, infoSecurePort=0, ipcPort=41849, storageInfo=lv=-57;cid=testClusterID;nsid=345996797;c=1732376707658), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-23T15:45:09,671 WARN [Thread-586 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/55b2995e-c92f-e507-f4f6-4dec8ffd6cfb/cluster_51d8d5c7-f8ba-5e46-1aba-1184a122a7a1/data/data5/current/BP-269514553-172.17.0.2-1732376707658/current, will proceed with Du for space computation calculation, 2024-11-23T15:45:09,672 WARN [Thread-587 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/55b2995e-c92f-e507-f4f6-4dec8ffd6cfb/cluster_51d8d5c7-f8ba-5e46-1aba-1184a122a7a1/data/data6/current/BP-269514553-172.17.0.2-1732376707658/current, will proceed with Du for space computation calculation, 2024-11-23T15:45:09,696 WARN [Thread-549 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-23T15:45:09,701 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xfe2d898712d6de8a with lease ID 0xa5f3ccb36263dfc1: Processing first storage report for DS-09915fe6-cd87-4c80-bcd5-8d5f98464ca4 from datanode DatanodeRegistration(127.0.0.1:38243, datanodeUuid=35f8d034-46dc-453d-abcb-b1541d6ec161, infoPort=41127, infoSecurePort=0, ipcPort=43301, storageInfo=lv=-57;cid=testClusterID;nsid=345996797;c=1732376707658) 2024-11-23T15:45:09,702 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xfe2d898712d6de8a with lease ID 0xa5f3ccb36263dfc1: from storage DS-09915fe6-cd87-4c80-bcd5-8d5f98464ca4 node DatanodeRegistration(127.0.0.1:38243, datanodeUuid=35f8d034-46dc-453d-abcb-b1541d6ec161, infoPort=41127, infoSecurePort=0, ipcPort=43301, storageInfo=lv=-57;cid=testClusterID;nsid=345996797;c=1732376707658), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-23T15:45:09,702 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xfe2d898712d6de8a with lease ID 0xa5f3ccb36263dfc1: Processing first storage report for DS-590dd9aa-c577-416e-90fa-efc6b4281ce1 from datanode DatanodeRegistration(127.0.0.1:38243, datanodeUuid=35f8d034-46dc-453d-abcb-b1541d6ec161, infoPort=41127, infoSecurePort=0, ipcPort=43301, storageInfo=lv=-57;cid=testClusterID;nsid=345996797;c=1732376707658) 2024-11-23T15:45:09,702 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xfe2d898712d6de8a with lease ID 0xa5f3ccb36263dfc1: from storage DS-590dd9aa-c577-416e-90fa-efc6b4281ce1 node DatanodeRegistration(127.0.0.1:38243, datanodeUuid=35f8d034-46dc-453d-abcb-b1541d6ec161, infoPort=41127, infoSecurePort=0, ipcPort=43301, storageInfo=lv=-57;cid=testClusterID;nsid=345996797;c=1732376707658), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-23T15:45:09,760 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T15:45:09,760 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T15:45:09,760 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T15:45:09,764 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/55b2995e-c92f-e507-f4f6-4dec8ffd6cfb 2024-11-23T15:45:09,766 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/55b2995e-c92f-e507-f4f6-4dec8ffd6cfb/cluster_51d8d5c7-f8ba-5e46-1aba-1184a122a7a1/zookeeper_0, clientPort=53351, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/55b2995e-c92f-e507-f4f6-4dec8ffd6cfb/cluster_51d8d5c7-f8ba-5e46-1aba-1184a122a7a1/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/55b2995e-c92f-e507-f4f6-4dec8ffd6cfb/cluster_51d8d5c7-f8ba-5e46-1aba-1184a122a7a1/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-23T15:45:09,771 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=53351 2024-11-23T15:45:09,771 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T15:45:09,773 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T15:45:09,790 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33725 is added to blk_1073741825_1001 (size=7) 2024-11-23T15:45:09,790 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34053 is added to blk_1073741825_1001 (size=7) 2024-11-23T15:45:09,790 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741825_1001 (size=7) 2024-11-23T15:45:09,792 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:37709/user/jenkins/test-data/7dbe2139-61bf-3334-cb98-f07ff23a0452 with version=8 2024-11-23T15:45:09,792 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:37303/user/jenkins/test-data/586bc8a6-e95a-79e2-f88e-ae2dc972e8e6/hbase-staging 2024-11-23T15:45:09,795 INFO [Time-limited test {}] client.ConnectionUtils(128): master/b712f9af2c12:0 server-side Connection retries=45 2024-11-23T15:45:09,795 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-23T15:45:09,795 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-23T15:45:09,796 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-23T15:45:09,796 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-23T15:45:09,796 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-23T15:45:09,796 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-23T15:45:09,796 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-23T15:45:09,797 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:46737 2024-11-23T15:45:09,799 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:46737 connecting to ZooKeeper ensemble=127.0.0.1:53351 2024-11-23T15:45:09,853 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:467370x0, quorum=127.0.0.1:53351, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-23T15:45:09,854 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:46737-0x10169a85d4b0000 connected 2024-11-23T15:45:09,916 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T15:45:09,918 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T15:45:09,920 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:46737-0x10169a85d4b0000, quorum=127.0.0.1:53351, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-23T15:45:09,920 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:37709/user/jenkins/test-data/7dbe2139-61bf-3334-cb98-f07ff23a0452, hbase.cluster.distributed=false 2024-11-23T15:45:09,922 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:46737-0x10169a85d4b0000, quorum=127.0.0.1:53351, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-23T15:45:09,924 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=46737 2024-11-23T15:45:09,924 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=46737 2024-11-23T15:45:09,924 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=46737 2024-11-23T15:45:09,925 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=46737 2024-11-23T15:45:09,925 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=46737 2024-11-23T15:45:09,945 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/b712f9af2c12:0 server-side Connection retries=45 2024-11-23T15:45:09,945 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-23T15:45:09,945 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-23T15:45:09,945 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-23T15:45:09,945 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-23T15:45:09,945 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-23T15:45:09,945 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-23T15:45:09,945 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-23T15:45:09,946 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:42451 2024-11-23T15:45:09,947 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:42451 connecting to ZooKeeper ensemble=127.0.0.1:53351 2024-11-23T15:45:09,949 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T15:45:09,950 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T15:45:09,964 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:424510x0, quorum=127.0.0.1:53351, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-23T15:45:09,964 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:424510x0, quorum=127.0.0.1:53351, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-23T15:45:09,964 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:42451-0x10169a85d4b0001 connected 2024-11-23T15:45:09,964 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-23T15:45:09,965 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-23T15:45:09,966 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42451-0x10169a85d4b0001, quorum=127.0.0.1:53351, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-23T15:45:09,967 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42451-0x10169a85d4b0001, quorum=127.0.0.1:53351, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-23T15:45:09,967 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=42451 2024-11-23T15:45:09,967 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=42451 2024-11-23T15:45:09,968 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=42451 2024-11-23T15:45:09,968 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=42451 2024-11-23T15:45:09,968 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=42451 2024-11-23T15:45:09,984 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/b712f9af2c12:0 server-side Connection retries=45 2024-11-23T15:45:09,984 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-23T15:45:09,984 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-23T15:45:09,985 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-23T15:45:09,985 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-23T15:45:09,985 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-23T15:45:09,985 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-23T15:45:09,985 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-23T15:45:09,985 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:42583 2024-11-23T15:45:09,987 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:42583 connecting to ZooKeeper ensemble=127.0.0.1:53351 2024-11-23T15:45:09,988 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T15:45:09,990 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T15:45:09,999 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:425830x0, quorum=127.0.0.1:53351, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-23T15:45:10,000 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:425830x0, quorum=127.0.0.1:53351, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-23T15:45:10,000 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:42583-0x10169a85d4b0002 connected 2024-11-23T15:45:10,000 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-23T15:45:10,001 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-23T15:45:10,001 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42583-0x10169a85d4b0002, quorum=127.0.0.1:53351, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-23T15:45:10,003 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42583-0x10169a85d4b0002, quorum=127.0.0.1:53351, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-23T15:45:10,003 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=42583 2024-11-23T15:45:10,004 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=42583 2024-11-23T15:45:10,004 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=42583 2024-11-23T15:45:10,005 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=42583 2024-11-23T15:45:10,005 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=42583 2024-11-23T15:45:10,021 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/b712f9af2c12:0 server-side Connection retries=45 2024-11-23T15:45:10,021 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-23T15:45:10,021 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-23T15:45:10,021 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-23T15:45:10,021 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-23T15:45:10,021 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-23T15:45:10,022 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-23T15:45:10,022 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-23T15:45:10,022 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:45655 2024-11-23T15:45:10,024 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:45655 connecting to ZooKeeper ensemble=127.0.0.1:53351 2024-11-23T15:45:10,025 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T15:45:10,027 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T15:45:10,039 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:456550x0, quorum=127.0.0.1:53351, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-23T15:45:10,039 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:45655-0x10169a85d4b0003 connected 2024-11-23T15:45:10,039 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45655-0x10169a85d4b0003, quorum=127.0.0.1:53351, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-23T15:45:10,040 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-23T15:45:10,040 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-23T15:45:10,041 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45655-0x10169a85d4b0003, quorum=127.0.0.1:53351, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-23T15:45:10,043 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45655-0x10169a85d4b0003, quorum=127.0.0.1:53351, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-23T15:45:10,045 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=45655 2024-11-23T15:45:10,045 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=45655 2024-11-23T15:45:10,046 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=45655 2024-11-23T15:45:10,047 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=45655 2024-11-23T15:45:10,047 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=45655 2024-11-23T15:45:10,062 DEBUG [M:0;b712f9af2c12:46737 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;b712f9af2c12:46737 2024-11-23T15:45:10,063 INFO [master/b712f9af2c12:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/b712f9af2c12,46737,1732376709795 2024-11-23T15:45:10,072 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45655-0x10169a85d4b0003, quorum=127.0.0.1:53351, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-23T15:45:10,072 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46737-0x10169a85d4b0000, quorum=127.0.0.1:53351, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-23T15:45:10,072 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42583-0x10169a85d4b0002, quorum=127.0.0.1:53351, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-23T15:45:10,072 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42451-0x10169a85d4b0001, quorum=127.0.0.1:53351, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-23T15:45:10,072 DEBUG [master/b712f9af2c12:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:46737-0x10169a85d4b0000, quorum=127.0.0.1:53351, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/b712f9af2c12,46737,1732376709795 2024-11-23T15:45:10,083 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42451-0x10169a85d4b0001, quorum=127.0.0.1:53351, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-23T15:45:10,083 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42451-0x10169a85d4b0001, quorum=127.0.0.1:53351, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T15:45:10,083 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42583-0x10169a85d4b0002, quorum=127.0.0.1:53351, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-23T15:45:10,083 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45655-0x10169a85d4b0003, quorum=127.0.0.1:53351, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-23T15:45:10,083 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42583-0x10169a85d4b0002, quorum=127.0.0.1:53351, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T15:45:10,083 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45655-0x10169a85d4b0003, quorum=127.0.0.1:53351, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T15:45:10,084 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46737-0x10169a85d4b0000, quorum=127.0.0.1:53351, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T15:45:10,085 DEBUG [master/b712f9af2c12:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:46737-0x10169a85d4b0000, quorum=127.0.0.1:53351, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-23T15:45:10,086 INFO [master/b712f9af2c12:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/b712f9af2c12,46737,1732376709795 from backup master directory 2024-11-23T15:45:10,097 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46737-0x10169a85d4b0000, quorum=127.0.0.1:53351, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/b712f9af2c12,46737,1732376709795 2024-11-23T15:45:10,097 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42451-0x10169a85d4b0001, quorum=127.0.0.1:53351, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-23T15:45:10,097 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46737-0x10169a85d4b0000, quorum=127.0.0.1:53351, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-23T15:45:10,097 WARN [master/b712f9af2c12:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-23T15:45:10,097 INFO [master/b712f9af2c12:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=b712f9af2c12,46737,1732376709795 2024-11-23T15:45:10,103 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42583-0x10169a85d4b0002, quorum=127.0.0.1:53351, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-23T15:45:10,103 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45655-0x10169a85d4b0003, quorum=127.0.0.1:53351, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-23T15:45:10,105 DEBUG [master/b712f9af2c12:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:37709/user/jenkins/test-data/7dbe2139-61bf-3334-cb98-f07ff23a0452/hbase.id] with ID: 8a48ee27-4f7d-4a73-9417-d11b2495e478 2024-11-23T15:45:10,105 DEBUG [master/b712f9af2c12:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:37709/user/jenkins/test-data/7dbe2139-61bf-3334-cb98-f07ff23a0452/.tmp/hbase.id 2024-11-23T15:45:10,122 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33725 is added to blk_1073741826_1002 (size=42) 2024-11-23T15:45:10,122 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741826_1002 (size=42) 2024-11-23T15:45:10,123 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34053 is added to blk_1073741826_1002 (size=42) 2024-11-23T15:45:10,123 DEBUG [master/b712f9af2c12:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:37709/user/jenkins/test-data/7dbe2139-61bf-3334-cb98-f07ff23a0452/.tmp/hbase.id]:[hdfs://localhost:37709/user/jenkins/test-data/7dbe2139-61bf-3334-cb98-f07ff23a0452/hbase.id] 2024-11-23T15:45:10,143 INFO [master/b712f9af2c12:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T15:45:10,143 INFO [master/b712f9af2c12:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-23T15:45:10,145 INFO [master/b712f9af2c12:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-11-23T15:45:10,158 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46737-0x10169a85d4b0000, quorum=127.0.0.1:53351, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T15:45:10,158 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45655-0x10169a85d4b0003, quorum=127.0.0.1:53351, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T15:45:10,158 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42451-0x10169a85d4b0001, quorum=127.0.0.1:53351, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T15:45:10,158 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42583-0x10169a85d4b0002, quorum=127.0.0.1:53351, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T15:45:10,175 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33725 is added to blk_1073741827_1003 (size=196) 2024-11-23T15:45:10,175 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741827_1003 (size=196) 2024-11-23T15:45:10,176 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34053 is added to blk_1073741827_1003 (size=196) 2024-11-23T15:45:10,177 INFO [master/b712f9af2c12:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-23T15:45:10,178 INFO [master/b712f9af2c12:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-23T15:45:10,178 INFO [master/b712f9af2c12:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-23T15:45:10,194 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33725 is added to blk_1073741828_1004 (size=1189) 2024-11-23T15:45:10,195 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34053 is added to blk_1073741828_1004 (size=1189) 2024-11-23T15:45:10,195 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741828_1004 (size=1189) 2024-11-23T15:45:10,196 INFO [master/b712f9af2c12:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:37709/user/jenkins/test-data/7dbe2139-61bf-3334-cb98-f07ff23a0452/MasterData/data/master/store 2024-11-23T15:45:10,213 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33725 is added to blk_1073741829_1005 (size=34) 2024-11-23T15:45:10,214 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741829_1005 (size=34) 2024-11-23T15:45:10,215 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34053 is added to blk_1073741829_1005 (size=34) 2024-11-23T15:45:10,216 DEBUG [master/b712f9af2c12:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T15:45:10,216 DEBUG [master/b712f9af2c12:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-23T15:45:10,216 INFO [master/b712f9af2c12:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-23T15:45:10,216 DEBUG [master/b712f9af2c12:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-23T15:45:10,216 DEBUG [master/b712f9af2c12:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-23T15:45:10,216 DEBUG [master/b712f9af2c12:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-23T15:45:10,216 INFO [master/b712f9af2c12:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-23T15:45:10,216 DEBUG [master/b712f9af2c12:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732376710216Disabling compacts and flushes for region at 1732376710216Disabling writes for close at 1732376710216Writing region close event to WAL at 1732376710216Closed at 1732376710216 2024-11-23T15:45:10,218 WARN [master/b712f9af2c12:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:37709/user/jenkins/test-data/7dbe2139-61bf-3334-cb98-f07ff23a0452/MasterData/data/master/store/.initializing 2024-11-23T15:45:10,218 DEBUG [master/b712f9af2c12:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:37709/user/jenkins/test-data/7dbe2139-61bf-3334-cb98-f07ff23a0452/MasterData/WALs/b712f9af2c12,46737,1732376709795 2024-11-23T15:45:10,223 INFO [master/b712f9af2c12:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=b712f9af2c12%2C46737%2C1732376709795, suffix=, logDir=hdfs://localhost:37709/user/jenkins/test-data/7dbe2139-61bf-3334-cb98-f07ff23a0452/MasterData/WALs/b712f9af2c12,46737,1732376709795, archiveDir=hdfs://localhost:37709/user/jenkins/test-data/7dbe2139-61bf-3334-cb98-f07ff23a0452/MasterData/oldWALs, maxLogs=10 2024-11-23T15:45:10,224 INFO [master/b712f9af2c12:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor b712f9af2c12%2C46737%2C1732376709795.1732376710223 2024-11-23T15:45:10,242 INFO [master/b712f9af2c12:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/7dbe2139-61bf-3334-cb98-f07ff23a0452/MasterData/WALs/b712f9af2c12,46737,1732376709795/b712f9af2c12%2C46737%2C1732376709795.1732376710223 2024-11-23T15:45:10,246 DEBUG [master/b712f9af2c12:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41127:41127),(127.0.0.1/127.0.0.1:38073:38073),(127.0.0.1/127.0.0.1:43721:43721)] 2024-11-23T15:45:10,248 DEBUG [master/b712f9af2c12:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-23T15:45:10,248 DEBUG [master/b712f9af2c12:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T15:45:10,248 DEBUG [master/b712f9af2c12:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-23T15:45:10,248 DEBUG [master/b712f9af2c12:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-23T15:45:10,251 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-23T15:45:10,253 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-23T15:45:10,254 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:45:10,254 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T15:45:10,254 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-23T15:45:10,256 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-23T15:45:10,256 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:45:10,257 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T15:45:10,257 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-23T15:45:10,260 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-23T15:45:10,260 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:45:10,261 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T15:45:10,261 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-23T15:45:10,262 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-23T15:45:10,263 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:45:10,263 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T15:45:10,263 DEBUG [master/b712f9af2c12:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-23T15:45:10,264 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-23T15:45:10,264 DEBUG [master/b712f9af2c12:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37709/user/jenkins/test-data/7dbe2139-61bf-3334-cb98-f07ff23a0452/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-23T15:45:10,265 DEBUG [master/b712f9af2c12:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37709/user/jenkins/test-data/7dbe2139-61bf-3334-cb98-f07ff23a0452/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-23T15:45:10,266 DEBUG [master/b712f9af2c12:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-23T15:45:10,266 DEBUG [master/b712f9af2c12:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-23T15:45:10,267 DEBUG [master/b712f9af2c12:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-23T15:45:10,268 DEBUG [master/b712f9af2c12:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-23T15:45:10,271 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T15:45:10,271 DEBUG [master/b712f9af2c12:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37709/user/jenkins/test-data/7dbe2139-61bf-3334-cb98-f07ff23a0452/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-23T15:45:10,273 INFO [master/b712f9af2c12:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=69168908, jitterRate=0.0306970477104187}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-23T15:45:10,275 DEBUG [master/b712f9af2c12:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732376710248Initializing all the Stores at 1732376710250 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732376710250Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732376710250Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732376710250Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732376710250Cleaning up temporary data from old regions at 1732376710266 (+16 ms)Region opened successfully at 1732376710275 (+9 ms) 2024-11-23T15:45:10,278 INFO [master/b712f9af2c12:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-23T15:45:10,285 DEBUG [master/b712f9af2c12:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@32878a23, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=b712f9af2c12/172.17.0.2:0 2024-11-23T15:45:10,286 INFO [master/b712f9af2c12:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-23T15:45:10,286 INFO [master/b712f9af2c12:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-23T15:45:10,286 INFO [master/b712f9af2c12:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-23T15:45:10,286 INFO [master/b712f9af2c12:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-23T15:45:10,287 INFO [master/b712f9af2c12:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-23T15:45:10,287 INFO [master/b712f9af2c12:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-23T15:45:10,287 INFO [master/b712f9af2c12:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-23T15:45:10,297 INFO [master/b712f9af2c12:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-23T15:45:10,298 DEBUG [master/b712f9af2c12:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46737-0x10169a85d4b0000, quorum=127.0.0.1:53351, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-23T15:45:10,316 DEBUG [master/b712f9af2c12:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-23T15:45:10,317 INFO [master/b712f9af2c12:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-23T15:45:10,318 DEBUG [master/b712f9af2c12:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46737-0x10169a85d4b0000, quorum=127.0.0.1:53351, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-23T15:45:10,333 DEBUG [master/b712f9af2c12:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-23T15:45:10,333 INFO [master/b712f9af2c12:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-23T15:45:10,334 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T15:45:10,335 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T15:45:10,344 DEBUG [master/b712f9af2c12:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46737-0x10169a85d4b0000, quorum=127.0.0.1:53351, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-23T15:45:10,349 DEBUG [master/b712f9af2c12:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-23T15:45:10,350 DEBUG [master/b712f9af2c12:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46737-0x10169a85d4b0000, quorum=127.0.0.1:53351, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-23T15:45:10,358 DEBUG [master/b712f9af2c12:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-23T15:45:10,360 DEBUG [master/b712f9af2c12:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46737-0x10169a85d4b0000, quorum=127.0.0.1:53351, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-23T15:45:10,366 DEBUG [master/b712f9af2c12:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-23T15:45:10,374 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42451-0x10169a85d4b0001, quorum=127.0.0.1:53351, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-23T15:45:10,374 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42583-0x10169a85d4b0002, quorum=127.0.0.1:53351, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-23T15:45:10,375 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45655-0x10169a85d4b0003, quorum=127.0.0.1:53351, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-23T15:45:10,375 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46737-0x10169a85d4b0000, quorum=127.0.0.1:53351, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-23T15:45:10,375 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45655-0x10169a85d4b0003, quorum=127.0.0.1:53351, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T15:45:10,375 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42583-0x10169a85d4b0002, quorum=127.0.0.1:53351, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T15:45:10,375 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46737-0x10169a85d4b0000, quorum=127.0.0.1:53351, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T15:45:10,375 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42451-0x10169a85d4b0001, quorum=127.0.0.1:53351, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T15:45:10,376 INFO [master/b712f9af2c12:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=b712f9af2c12,46737,1732376709795, sessionid=0x10169a85d4b0000, setting cluster-up flag (Was=false) 2024-11-23T15:45:10,391 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42583-0x10169a85d4b0002, quorum=127.0.0.1:53351, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T15:45:10,391 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42451-0x10169a85d4b0001, quorum=127.0.0.1:53351, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T15:45:10,391 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46737-0x10169a85d4b0000, quorum=127.0.0.1:53351, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T15:45:10,391 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45655-0x10169a85d4b0003, quorum=127.0.0.1:53351, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T15:45:10,416 DEBUG [master/b712f9af2c12:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-23T15:45:10,418 DEBUG [master/b712f9af2c12:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=b712f9af2c12,46737,1732376709795 2024-11-23T15:45:10,433 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42583-0x10169a85d4b0002, quorum=127.0.0.1:53351, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T15:45:10,433 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42451-0x10169a85d4b0001, quorum=127.0.0.1:53351, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T15:45:10,433 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45655-0x10169a85d4b0003, quorum=127.0.0.1:53351, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T15:45:10,433 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46737-0x10169a85d4b0000, quorum=127.0.0.1:53351, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T15:45:10,458 DEBUG [master/b712f9af2c12:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-23T15:45:10,459 DEBUG [master/b712f9af2c12:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=b712f9af2c12,46737,1732376709795 2024-11-23T15:45:10,460 INFO [master/b712f9af2c12:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:37709/user/jenkins/test-data/7dbe2139-61bf-3334-cb98-f07ff23a0452/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-23T15:45:10,463 DEBUG [master/b712f9af2c12:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-23T15:45:10,463 INFO [master/b712f9af2c12:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-23T15:45:10,464 INFO [master/b712f9af2c12:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-23T15:45:10,464 DEBUG [master/b712f9af2c12:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: b712f9af2c12,46737,1732376709795 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-23T15:45:10,465 DEBUG [master/b712f9af2c12:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/b712f9af2c12:0, corePoolSize=5, maxPoolSize=5 2024-11-23T15:45:10,465 DEBUG [master/b712f9af2c12:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/b712f9af2c12:0, corePoolSize=5, maxPoolSize=5 2024-11-23T15:45:10,465 DEBUG [master/b712f9af2c12:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/b712f9af2c12:0, corePoolSize=5, maxPoolSize=5 2024-11-23T15:45:10,465 DEBUG [master/b712f9af2c12:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/b712f9af2c12:0, corePoolSize=5, maxPoolSize=5 2024-11-23T15:45:10,465 DEBUG [master/b712f9af2c12:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/b712f9af2c12:0, corePoolSize=10, maxPoolSize=10 2024-11-23T15:45:10,466 DEBUG [master/b712f9af2c12:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/b712f9af2c12:0, corePoolSize=1, maxPoolSize=1 2024-11-23T15:45:10,466 DEBUG [master/b712f9af2c12:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/b712f9af2c12:0, corePoolSize=2, maxPoolSize=2 2024-11-23T15:45:10,466 DEBUG [master/b712f9af2c12:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/b712f9af2c12:0, corePoolSize=1, maxPoolSize=1 2024-11-23T15:45:10,467 INFO [master/b712f9af2c12:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732376740467 2024-11-23T15:45:10,468 INFO [master/b712f9af2c12:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-23T15:45:10,468 INFO [master/b712f9af2c12:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-23T15:45:10,468 INFO [master/b712f9af2c12:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-23T15:45:10,468 INFO [master/b712f9af2c12:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-23T15:45:10,468 INFO [master/b712f9af2c12:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-23T15:45:10,468 INFO [master/b712f9af2c12:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-23T15:45:10,468 INFO [master/b712f9af2c12:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-23T15:45:10,468 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-23T15:45:10,468 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-23T15:45:10,469 INFO [master/b712f9af2c12:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-23T15:45:10,469 INFO [master/b712f9af2c12:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-23T15:45:10,469 INFO [master/b712f9af2c12:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-23T15:45:10,469 INFO [master/b712f9af2c12:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-23T15:45:10,469 INFO [master/b712f9af2c12:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-23T15:45:10,469 DEBUG [master/b712f9af2c12:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/b712f9af2c12:0:becomeActiveMaster-HFileCleaner.large.0-1732376710469,5,FailOnTimeoutGroup] 2024-11-23T15:45:10,469 DEBUG [master/b712f9af2c12:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/b712f9af2c12:0:becomeActiveMaster-HFileCleaner.small.0-1732376710469,5,FailOnTimeoutGroup] 2024-11-23T15:45:10,469 INFO [master/b712f9af2c12:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-23T15:45:10,470 INFO [master/b712f9af2c12:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-23T15:45:10,470 INFO [master/b712f9af2c12:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-23T15:45:10,470 INFO [master/b712f9af2c12:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-23T15:45:10,470 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:45:10,470 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-23T15:45:10,481 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741831_1007 (size=1321) 2024-11-23T15:45:10,482 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34053 is added to blk_1073741831_1007 (size=1321) 2024-11-23T15:45:10,482 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33725 is added to blk_1073741831_1007 (size=1321) 2024-11-23T15:45:10,484 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:37709/user/jenkins/test-data/7dbe2139-61bf-3334-cb98-f07ff23a0452/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-23T15:45:10,485 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:37709/user/jenkins/test-data/7dbe2139-61bf-3334-cb98-f07ff23a0452 2024-11-23T15:45:10,497 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34053 is added to blk_1073741832_1008 (size=32) 2024-11-23T15:45:10,498 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33725 is added to blk_1073741832_1008 (size=32) 2024-11-23T15:45:10,498 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741832_1008 (size=32) 2024-11-23T15:45:10,499 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T15:45:10,501 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-23T15:45:10,504 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-23T15:45:10,504 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:45:10,505 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T15:45:10,505 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-23T15:45:10,507 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-23T15:45:10,507 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:45:10,508 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T15:45:10,508 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-23T15:45:10,510 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-23T15:45:10,511 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:45:10,512 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T15:45:10,512 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-23T15:45:10,515 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-23T15:45:10,515 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:45:10,517 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T15:45:10,518 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-23T15:45:10,519 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37709/user/jenkins/test-data/7dbe2139-61bf-3334-cb98-f07ff23a0452/data/hbase/meta/1588230740 2024-11-23T15:45:10,520 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37709/user/jenkins/test-data/7dbe2139-61bf-3334-cb98-f07ff23a0452/data/hbase/meta/1588230740 2024-11-23T15:45:10,522 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-23T15:45:10,522 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-23T15:45:10,523 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-23T15:45:10,525 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-23T15:45:10,528 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37709/user/jenkins/test-data/7dbe2139-61bf-3334-cb98-f07ff23a0452/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-23T15:45:10,529 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=73956102, jitterRate=0.10203179717063904}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-23T15:45:10,530 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732376710499Initializing all the Stores at 1732376710501 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732376710501Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732376710501Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732376710501Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732376710501Cleaning up temporary data from old regions at 1732376710522 (+21 ms)Region opened successfully at 1732376710530 (+8 ms) 2024-11-23T15:45:10,530 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-23T15:45:10,530 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-23T15:45:10,530 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-23T15:45:10,530 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-23T15:45:10,530 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-23T15:45:10,531 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-23T15:45:10,531 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732376710530Disabling compacts and flushes for region at 1732376710530Disabling writes for close at 1732376710530Writing region close event to WAL at 1732376710531 (+1 ms)Closed at 1732376710531 2024-11-23T15:45:10,533 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-23T15:45:10,533 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-23T15:45:10,533 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-23T15:45:10,535 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-23T15:45:10,537 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-23T15:45:10,550 INFO [RS:0;b712f9af2c12:42451 {}] regionserver.HRegionServer(746): ClusterId : 8a48ee27-4f7d-4a73-9417-d11b2495e478 2024-11-23T15:45:10,550 INFO [RS:1;b712f9af2c12:42583 {}] regionserver.HRegionServer(746): ClusterId : 8a48ee27-4f7d-4a73-9417-d11b2495e478 2024-11-23T15:45:10,550 DEBUG [RS:1;b712f9af2c12:42583 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-23T15:45:10,550 DEBUG [RS:0;b712f9af2c12:42451 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-23T15:45:10,572 INFO [RS:2;b712f9af2c12:45655 {}] regionserver.HRegionServer(746): ClusterId : 8a48ee27-4f7d-4a73-9417-d11b2495e478 2024-11-23T15:45:10,572 DEBUG [RS:2;b712f9af2c12:45655 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-23T15:45:10,573 DEBUG [RS:1;b712f9af2c12:42583 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-23T15:45:10,573 DEBUG [RS:0;b712f9af2c12:42451 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-23T15:45:10,573 DEBUG [RS:1;b712f9af2c12:42583 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-23T15:45:10,573 DEBUG [RS:0;b712f9af2c12:42451 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-23T15:45:10,591 DEBUG [RS:2;b712f9af2c12:45655 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-23T15:45:10,591 DEBUG [RS:2;b712f9af2c12:45655 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-23T15:45:10,592 DEBUG [RS:0;b712f9af2c12:42451 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-23T15:45:10,592 DEBUG [RS:0;b712f9af2c12:42451 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@45f9f2d7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=b712f9af2c12/172.17.0.2:0 2024-11-23T15:45:10,599 DEBUG [RS:1;b712f9af2c12:42583 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-23T15:45:10,600 DEBUG [RS:1;b712f9af2c12:42583 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@406e479, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=b712f9af2c12/172.17.0.2:0 2024-11-23T15:45:10,602 DEBUG [RS:2;b712f9af2c12:45655 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-23T15:45:10,603 DEBUG [RS:2;b712f9af2c12:45655 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2e843c9d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=b712f9af2c12/172.17.0.2:0 2024-11-23T15:45:10,609 DEBUG [RS:0;b712f9af2c12:42451 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;b712f9af2c12:42451 2024-11-23T15:45:10,609 INFO [RS:0;b712f9af2c12:42451 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-23T15:45:10,609 INFO [RS:0;b712f9af2c12:42451 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-23T15:45:10,609 DEBUG [RS:0;b712f9af2c12:42451 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-23T15:45:10,614 DEBUG [RS:1;b712f9af2c12:42583 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;b712f9af2c12:42583 2024-11-23T15:45:10,615 INFO [RS:1;b712f9af2c12:42583 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-23T15:45:10,615 INFO [RS:1;b712f9af2c12:42583 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-23T15:45:10,615 DEBUG [RS:1;b712f9af2c12:42583 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-23T15:45:10,616 INFO [RS:0;b712f9af2c12:42451 {}] regionserver.HRegionServer(2659): reportForDuty to master=b712f9af2c12,46737,1732376709795 with port=42451, startcode=1732376709944 2024-11-23T15:45:10,616 DEBUG [RS:0;b712f9af2c12:42451 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-23T15:45:10,617 DEBUG [RS:2;b712f9af2c12:45655 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:2;b712f9af2c12:45655 2024-11-23T15:45:10,617 INFO [RS:2;b712f9af2c12:45655 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-23T15:45:10,618 INFO [RS:2;b712f9af2c12:45655 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-23T15:45:10,618 INFO [RS:1;b712f9af2c12:42583 {}] regionserver.HRegionServer(2659): reportForDuty to master=b712f9af2c12,46737,1732376709795 with port=42583, startcode=1732376709984 2024-11-23T15:45:10,618 DEBUG [RS:2;b712f9af2c12:45655 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-23T15:45:10,618 DEBUG [RS:1;b712f9af2c12:42583 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-23T15:45:10,618 INFO [RS:2;b712f9af2c12:45655 {}] regionserver.HRegionServer(2659): reportForDuty to master=b712f9af2c12,46737,1732376709795 with port=45655, startcode=1732376710020 2024-11-23T15:45:10,619 DEBUG [RS:2;b712f9af2c12:45655 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-23T15:45:10,621 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60815, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.3 (auth:SIMPLE), service=RegionServerStatusService 2024-11-23T15:45:10,621 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46737 {}] master.ServerManager(363): Checking decommissioned status of RegionServer b712f9af2c12,42451,1732376709944 2024-11-23T15:45:10,622 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46737 {}] master.ServerManager(517): Registering regionserver=b712f9af2c12,42451,1732376709944 2024-11-23T15:45:10,622 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38309, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.4 (auth:SIMPLE), service=RegionServerStatusService 2024-11-23T15:45:10,622 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60215, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.5 (auth:SIMPLE), service=RegionServerStatusService 2024-11-23T15:45:10,624 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46737 {}] master.ServerManager(363): Checking decommissioned status of RegionServer b712f9af2c12,45655,1732376710020 2024-11-23T15:45:10,625 DEBUG [RS:0;b712f9af2c12:42451 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:37709/user/jenkins/test-data/7dbe2139-61bf-3334-cb98-f07ff23a0452 2024-11-23T15:45:10,625 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46737 {}] master.ServerManager(517): Registering regionserver=b712f9af2c12,45655,1732376710020 2024-11-23T15:45:10,625 DEBUG [RS:0;b712f9af2c12:42451 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:37709 2024-11-23T15:45:10,625 DEBUG [RS:0;b712f9af2c12:42451 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-23T15:45:10,627 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46737 {}] master.ServerManager(363): Checking decommissioned status of RegionServer b712f9af2c12,42583,1732376709984 2024-11-23T15:45:10,627 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46737 {}] master.ServerManager(517): Registering regionserver=b712f9af2c12,42583,1732376709984 2024-11-23T15:45:10,627 DEBUG [RS:2;b712f9af2c12:45655 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:37709/user/jenkins/test-data/7dbe2139-61bf-3334-cb98-f07ff23a0452 2024-11-23T15:45:10,627 DEBUG [RS:2;b712f9af2c12:45655 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:37709 2024-11-23T15:45:10,627 DEBUG [RS:2;b712f9af2c12:45655 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-23T15:45:10,629 DEBUG [RS:1;b712f9af2c12:42583 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:37709/user/jenkins/test-data/7dbe2139-61bf-3334-cb98-f07ff23a0452 2024-11-23T15:45:10,630 DEBUG [RS:1;b712f9af2c12:42583 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:37709 2024-11-23T15:45:10,630 DEBUG [RS:1;b712f9af2c12:42583 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-23T15:45:10,632 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46737-0x10169a85d4b0000, quorum=127.0.0.1:53351, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-23T15:45:10,663 DEBUG [RS:0;b712f9af2c12:42451 {}] zookeeper.ZKUtil(111): regionserver:42451-0x10169a85d4b0001, quorum=127.0.0.1:53351, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/b712f9af2c12,42451,1732376709944 2024-11-23T15:45:10,663 WARN [RS:0;b712f9af2c12:42451 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-23T15:45:10,663 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [b712f9af2c12,42451,1732376709944] 2024-11-23T15:45:10,663 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [b712f9af2c12,45655,1732376710020] 2024-11-23T15:45:10,663 INFO [RS:0;b712f9af2c12:42451 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-23T15:45:10,663 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [b712f9af2c12,42583,1732376709984] 2024-11-23T15:45:10,663 DEBUG [RS:0;b712f9af2c12:42451 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:37709/user/jenkins/test-data/7dbe2139-61bf-3334-cb98-f07ff23a0452/WALs/b712f9af2c12,42451,1732376709944 2024-11-23T15:45:10,664 DEBUG [RS:1;b712f9af2c12:42583 {}] zookeeper.ZKUtil(111): regionserver:42583-0x10169a85d4b0002, quorum=127.0.0.1:53351, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/b712f9af2c12,42583,1732376709984 2024-11-23T15:45:10,664 WARN [RS:1;b712f9af2c12:42583 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-23T15:45:10,665 INFO [RS:1;b712f9af2c12:42583 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-23T15:45:10,665 DEBUG [RS:1;b712f9af2c12:42583 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:37709/user/jenkins/test-data/7dbe2139-61bf-3334-cb98-f07ff23a0452/WALs/b712f9af2c12,42583,1732376709984 2024-11-23T15:45:10,665 DEBUG [RS:2;b712f9af2c12:45655 {}] zookeeper.ZKUtil(111): regionserver:45655-0x10169a85d4b0003, quorum=127.0.0.1:53351, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/b712f9af2c12,45655,1732376710020 2024-11-23T15:45:10,665 WARN [RS:2;b712f9af2c12:45655 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-23T15:45:10,665 INFO [RS:2;b712f9af2c12:45655 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-23T15:45:10,665 DEBUG [RS:2;b712f9af2c12:45655 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:37709/user/jenkins/test-data/7dbe2139-61bf-3334-cb98-f07ff23a0452/WALs/b712f9af2c12,45655,1732376710020 2024-11-23T15:45:10,669 INFO [RS:0;b712f9af2c12:42451 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-23T15:45:10,673 INFO [RS:2;b712f9af2c12:45655 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-23T15:45:10,673 INFO [RS:1;b712f9af2c12:42583 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-23T15:45:10,674 INFO [RS:0;b712f9af2c12:42451 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-23T15:45:10,675 INFO [RS:0;b712f9af2c12:42451 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-23T15:45:10,675 INFO [RS:0;b712f9af2c12:42451 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-23T15:45:10,676 INFO [RS:2;b712f9af2c12:45655 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-23T15:45:10,678 INFO [RS:0;b712f9af2c12:42451 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-23T15:45:10,679 INFO [RS:1;b712f9af2c12:42583 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-23T15:45:10,679 INFO [RS:1;b712f9af2c12:42583 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-23T15:45:10,680 INFO [RS:1;b712f9af2c12:42583 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-23T15:45:10,680 INFO [RS:1;b712f9af2c12:42583 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-23T15:45:10,680 INFO [RS:2;b712f9af2c12:45655 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-23T15:45:10,680 INFO [RS:2;b712f9af2c12:45655 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-23T15:45:10,682 INFO [RS:0;b712f9af2c12:42451 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-23T15:45:10,683 INFO [RS:1;b712f9af2c12:42583 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-23T15:45:10,683 INFO [RS:0;b712f9af2c12:42451 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-23T15:45:10,683 INFO [RS:1;b712f9af2c12:42583 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-23T15:45:10,683 DEBUG [RS:0;b712f9af2c12:42451 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/b712f9af2c12:0, corePoolSize=1, maxPoolSize=1 2024-11-23T15:45:10,683 DEBUG [RS:1;b712f9af2c12:42583 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/b712f9af2c12:0, corePoolSize=1, maxPoolSize=1 2024-11-23T15:45:10,683 DEBUG [RS:0;b712f9af2c12:42451 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/b712f9af2c12:0, corePoolSize=1, maxPoolSize=1 2024-11-23T15:45:10,683 DEBUG [RS:1;b712f9af2c12:42583 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/b712f9af2c12:0, corePoolSize=1, maxPoolSize=1 2024-11-23T15:45:10,683 DEBUG [RS:0;b712f9af2c12:42451 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/b712f9af2c12:0, corePoolSize=1, maxPoolSize=1 2024-11-23T15:45:10,683 DEBUG [RS:1;b712f9af2c12:42583 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/b712f9af2c12:0, corePoolSize=1, maxPoolSize=1 2024-11-23T15:45:10,683 DEBUG [RS:0;b712f9af2c12:42451 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/b712f9af2c12:0, corePoolSize=1, maxPoolSize=1 2024-11-23T15:45:10,683 DEBUG [RS:0;b712f9af2c12:42451 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/b712f9af2c12:0, corePoolSize=1, maxPoolSize=1 2024-11-23T15:45:10,683 DEBUG [RS:1;b712f9af2c12:42583 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/b712f9af2c12:0, corePoolSize=1, maxPoolSize=1 2024-11-23T15:45:10,683 DEBUG [RS:0;b712f9af2c12:42451 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/b712f9af2c12:0, corePoolSize=2, maxPoolSize=2 2024-11-23T15:45:10,683 DEBUG [RS:1;b712f9af2c12:42583 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/b712f9af2c12:0, corePoolSize=1, maxPoolSize=1 2024-11-23T15:45:10,683 DEBUG [RS:0;b712f9af2c12:42451 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/b712f9af2c12:0, corePoolSize=1, maxPoolSize=1 2024-11-23T15:45:10,683 DEBUG [RS:1;b712f9af2c12:42583 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/b712f9af2c12:0, corePoolSize=2, maxPoolSize=2 2024-11-23T15:45:10,683 DEBUG [RS:0;b712f9af2c12:42451 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/b712f9af2c12:0, corePoolSize=1, maxPoolSize=1 2024-11-23T15:45:10,683 DEBUG [RS:0;b712f9af2c12:42451 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/b712f9af2c12:0, corePoolSize=1, maxPoolSize=1 2024-11-23T15:45:10,684 DEBUG [RS:1;b712f9af2c12:42583 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/b712f9af2c12:0, corePoolSize=1, maxPoolSize=1 2024-11-23T15:45:10,684 DEBUG [RS:0;b712f9af2c12:42451 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/b712f9af2c12:0, corePoolSize=1, maxPoolSize=1 2024-11-23T15:45:10,684 DEBUG [RS:1;b712f9af2c12:42583 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/b712f9af2c12:0, corePoolSize=1, maxPoolSize=1 2024-11-23T15:45:10,684 DEBUG [RS:0;b712f9af2c12:42451 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/b712f9af2c12:0, corePoolSize=1, maxPoolSize=1 2024-11-23T15:45:10,684 DEBUG [RS:0;b712f9af2c12:42451 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/b712f9af2c12:0, corePoolSize=1, maxPoolSize=1 2024-11-23T15:45:10,684 DEBUG [RS:1;b712f9af2c12:42583 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/b712f9af2c12:0, corePoolSize=1, maxPoolSize=1 2024-11-23T15:45:10,684 DEBUG [RS:0;b712f9af2c12:42451 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/b712f9af2c12:0, corePoolSize=3, maxPoolSize=3 2024-11-23T15:45:10,684 DEBUG [RS:1;b712f9af2c12:42583 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/b712f9af2c12:0, corePoolSize=1, maxPoolSize=1 2024-11-23T15:45:10,684 DEBUG [RS:0;b712f9af2c12:42451 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/b712f9af2c12:0, corePoolSize=3, maxPoolSize=3 2024-11-23T15:45:10,684 DEBUG [RS:1;b712f9af2c12:42583 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/b712f9af2c12:0, corePoolSize=1, maxPoolSize=1 2024-11-23T15:45:10,684 DEBUG [RS:1;b712f9af2c12:42583 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/b712f9af2c12:0, corePoolSize=1, maxPoolSize=1 2024-11-23T15:45:10,684 DEBUG [RS:1;b712f9af2c12:42583 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/b712f9af2c12:0, corePoolSize=3, maxPoolSize=3 2024-11-23T15:45:10,684 DEBUG [RS:1;b712f9af2c12:42583 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/b712f9af2c12:0, corePoolSize=3, maxPoolSize=3 2024-11-23T15:45:10,686 INFO [RS:2;b712f9af2c12:45655 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-23T15:45:10,687 WARN [b712f9af2c12:46737 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-23T15:45:10,687 INFO [RS:2;b712f9af2c12:45655 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-23T15:45:10,687 INFO [RS:2;b712f9af2c12:45655 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-23T15:45:10,688 DEBUG [RS:2;b712f9af2c12:45655 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/b712f9af2c12:0, corePoolSize=1, maxPoolSize=1 2024-11-23T15:45:10,688 DEBUG [RS:2;b712f9af2c12:45655 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/b712f9af2c12:0, corePoolSize=1, maxPoolSize=1 2024-11-23T15:45:10,688 DEBUG [RS:2;b712f9af2c12:45655 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/b712f9af2c12:0, corePoolSize=1, maxPoolSize=1 2024-11-23T15:45:10,688 DEBUG [RS:2;b712f9af2c12:45655 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/b712f9af2c12:0, corePoolSize=1, maxPoolSize=1 2024-11-23T15:45:10,688 DEBUG [RS:2;b712f9af2c12:45655 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/b712f9af2c12:0, corePoolSize=1, maxPoolSize=1 2024-11-23T15:45:10,688 DEBUG [RS:2;b712f9af2c12:45655 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/b712f9af2c12:0, corePoolSize=2, maxPoolSize=2 2024-11-23T15:45:10,688 DEBUG [RS:2;b712f9af2c12:45655 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/b712f9af2c12:0, corePoolSize=1, maxPoolSize=1 2024-11-23T15:45:10,688 DEBUG [RS:2;b712f9af2c12:45655 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/b712f9af2c12:0, corePoolSize=1, maxPoolSize=1 2024-11-23T15:45:10,688 DEBUG [RS:2;b712f9af2c12:45655 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/b712f9af2c12:0, corePoolSize=1, maxPoolSize=1 2024-11-23T15:45:10,688 DEBUG [RS:2;b712f9af2c12:45655 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/b712f9af2c12:0, corePoolSize=1, maxPoolSize=1 2024-11-23T15:45:10,688 DEBUG [RS:2;b712f9af2c12:45655 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/b712f9af2c12:0, corePoolSize=1, maxPoolSize=1 2024-11-23T15:45:10,688 DEBUG [RS:2;b712f9af2c12:45655 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/b712f9af2c12:0, corePoolSize=1, maxPoolSize=1 2024-11-23T15:45:10,688 DEBUG [RS:2;b712f9af2c12:45655 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/b712f9af2c12:0, corePoolSize=3, maxPoolSize=3 2024-11-23T15:45:10,688 DEBUG [RS:2;b712f9af2c12:45655 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/b712f9af2c12:0, corePoolSize=3, maxPoolSize=3 2024-11-23T15:45:10,701 INFO [RS:0;b712f9af2c12:42451 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-23T15:45:10,701 INFO [RS:1;b712f9af2c12:42583 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-23T15:45:10,701 INFO [RS:2;b712f9af2c12:45655 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-23T15:45:10,701 INFO [RS:0;b712f9af2c12:42451 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-23T15:45:10,701 INFO [RS:1;b712f9af2c12:42583 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-23T15:45:10,701 INFO [RS:2;b712f9af2c12:45655 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-23T15:45:10,701 INFO [RS:0;b712f9af2c12:42451 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-23T15:45:10,701 INFO [RS:1;b712f9af2c12:42583 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-23T15:45:10,701 INFO [RS:2;b712f9af2c12:45655 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-23T15:45:10,701 INFO [RS:0;b712f9af2c12:42451 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-23T15:45:10,701 INFO [RS:1;b712f9af2c12:42583 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-23T15:45:10,701 INFO [RS:2;b712f9af2c12:45655 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-23T15:45:10,701 INFO [RS:0;b712f9af2c12:42451 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-23T15:45:10,701 INFO [RS:1;b712f9af2c12:42583 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-23T15:45:10,701 INFO [RS:2;b712f9af2c12:45655 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-23T15:45:10,701 INFO [RS:1;b712f9af2c12:42583 {}] hbase.ChoreService(168): Chore ScheduledChore name=b712f9af2c12,42583,1732376709984-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-23T15:45:10,701 INFO [RS:0;b712f9af2c12:42451 {}] hbase.ChoreService(168): Chore ScheduledChore name=b712f9af2c12,42451,1732376709944-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-23T15:45:10,701 INFO [RS:2;b712f9af2c12:45655 {}] hbase.ChoreService(168): Chore ScheduledChore name=b712f9af2c12,45655,1732376710020-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-23T15:45:10,720 INFO [RS:0;b712f9af2c12:42451 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-23T15:45:10,720 INFO [RS:0;b712f9af2c12:42451 {}] hbase.ChoreService(168): Chore ScheduledChore name=b712f9af2c12,42451,1732376709944-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-23T15:45:10,720 INFO [RS:0;b712f9af2c12:42451 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-23T15:45:10,720 INFO [RS:0;b712f9af2c12:42451 {}] regionserver.Replication(171): b712f9af2c12,42451,1732376709944 started 2024-11-23T15:45:10,723 INFO [RS:2;b712f9af2c12:45655 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-23T15:45:10,723 INFO [RS:1;b712f9af2c12:42583 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-23T15:45:10,724 INFO [RS:2;b712f9af2c12:45655 {}] hbase.ChoreService(168): Chore ScheduledChore name=b712f9af2c12,45655,1732376710020-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-23T15:45:10,724 INFO [RS:1;b712f9af2c12:42583 {}] hbase.ChoreService(168): Chore ScheduledChore name=b712f9af2c12,42583,1732376709984-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-23T15:45:10,724 INFO [RS:1;b712f9af2c12:42583 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-23T15:45:10,724 INFO [RS:2;b712f9af2c12:45655 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-23T15:45:10,724 INFO [RS:1;b712f9af2c12:42583 {}] regionserver.Replication(171): b712f9af2c12,42583,1732376709984 started 2024-11-23T15:45:10,724 INFO [RS:2;b712f9af2c12:45655 {}] regionserver.Replication(171): b712f9af2c12,45655,1732376710020 started 2024-11-23T15:45:10,743 INFO [RS:0;b712f9af2c12:42451 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-23T15:45:10,744 INFO [RS:0;b712f9af2c12:42451 {}] regionserver.HRegionServer(1482): Serving as b712f9af2c12,42451,1732376709944, RpcServer on b712f9af2c12/172.17.0.2:42451, sessionid=0x10169a85d4b0001 2024-11-23T15:45:10,744 DEBUG [RS:0;b712f9af2c12:42451 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-23T15:45:10,744 DEBUG [RS:0;b712f9af2c12:42451 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager b712f9af2c12,42451,1732376709944 2024-11-23T15:45:10,744 DEBUG [RS:0;b712f9af2c12:42451 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'b712f9af2c12,42451,1732376709944' 2024-11-23T15:45:10,744 DEBUG [RS:0;b712f9af2c12:42451 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-23T15:45:10,745 DEBUG [RS:0;b712f9af2c12:42451 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-23T15:45:10,746 DEBUG [RS:0;b712f9af2c12:42451 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-23T15:45:10,746 DEBUG [RS:0;b712f9af2c12:42451 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-23T15:45:10,746 INFO [RS:1;b712f9af2c12:42583 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-23T15:45:10,746 INFO [RS:2;b712f9af2c12:45655 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-23T15:45:10,746 DEBUG [RS:0;b712f9af2c12:42451 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager b712f9af2c12,42451,1732376709944 2024-11-23T15:45:10,746 DEBUG [RS:0;b712f9af2c12:42451 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'b712f9af2c12,42451,1732376709944' 2024-11-23T15:45:10,746 INFO [RS:1;b712f9af2c12:42583 {}] regionserver.HRegionServer(1482): Serving as b712f9af2c12,42583,1732376709984, RpcServer on b712f9af2c12/172.17.0.2:42583, sessionid=0x10169a85d4b0002 2024-11-23T15:45:10,746 INFO [RS:2;b712f9af2c12:45655 {}] regionserver.HRegionServer(1482): Serving as b712f9af2c12,45655,1732376710020, RpcServer on b712f9af2c12/172.17.0.2:45655, sessionid=0x10169a85d4b0003 2024-11-23T15:45:10,746 DEBUG [RS:0;b712f9af2c12:42451 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-23T15:45:10,747 DEBUG [RS:2;b712f9af2c12:45655 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-23T15:45:10,747 DEBUG [RS:2;b712f9af2c12:45655 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager b712f9af2c12,45655,1732376710020 2024-11-23T15:45:10,747 DEBUG [RS:2;b712f9af2c12:45655 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'b712f9af2c12,45655,1732376710020' 2024-11-23T15:45:10,747 DEBUG [RS:2;b712f9af2c12:45655 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-23T15:45:10,747 DEBUG [RS:1;b712f9af2c12:42583 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-23T15:45:10,747 DEBUG [RS:1;b712f9af2c12:42583 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager b712f9af2c12,42583,1732376709984 2024-11-23T15:45:10,747 DEBUG [RS:1;b712f9af2c12:42583 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'b712f9af2c12,42583,1732376709984' 2024-11-23T15:45:10,747 DEBUG [RS:1;b712f9af2c12:42583 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-23T15:45:10,748 DEBUG [RS:1;b712f9af2c12:42583 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-23T15:45:10,748 DEBUG [RS:1;b712f9af2c12:42583 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-23T15:45:10,748 DEBUG [RS:1;b712f9af2c12:42583 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-23T15:45:10,748 DEBUG [RS:1;b712f9af2c12:42583 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager b712f9af2c12,42583,1732376709984 2024-11-23T15:45:10,748 DEBUG [RS:1;b712f9af2c12:42583 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'b712f9af2c12,42583,1732376709984' 2024-11-23T15:45:10,748 DEBUG [RS:1;b712f9af2c12:42583 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-23T15:45:10,749 DEBUG [RS:1;b712f9af2c12:42583 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-23T15:45:10,749 DEBUG [RS:1;b712f9af2c12:42583 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-23T15:45:10,749 INFO [RS:1;b712f9af2c12:42583 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-23T15:45:10,749 INFO [RS:1;b712f9af2c12:42583 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-23T15:45:10,751 DEBUG [RS:0;b712f9af2c12:42451 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-23T15:45:10,751 DEBUG [RS:2;b712f9af2c12:45655 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-23T15:45:10,752 DEBUG [RS:0;b712f9af2c12:42451 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-23T15:45:10,752 DEBUG [RS:2;b712f9af2c12:45655 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-23T15:45:10,752 DEBUG [RS:2;b712f9af2c12:45655 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-23T15:45:10,752 INFO [RS:0;b712f9af2c12:42451 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-23T15:45:10,752 DEBUG [RS:2;b712f9af2c12:45655 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager b712f9af2c12,45655,1732376710020 2024-11-23T15:45:10,752 INFO [RS:0;b712f9af2c12:42451 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-23T15:45:10,752 DEBUG [RS:2;b712f9af2c12:45655 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'b712f9af2c12,45655,1732376710020' 2024-11-23T15:45:10,752 DEBUG [RS:2;b712f9af2c12:45655 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-23T15:45:10,752 DEBUG [RS:2;b712f9af2c12:45655 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-23T15:45:10,755 DEBUG [RS:2;b712f9af2c12:45655 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-23T15:45:10,755 INFO [RS:2;b712f9af2c12:45655 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-23T15:45:10,755 INFO [RS:2;b712f9af2c12:45655 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-23T15:45:10,851 INFO [RS:1;b712f9af2c12:42583 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=b712f9af2c12%2C42583%2C1732376709984, suffix=, logDir=hdfs://localhost:37709/user/jenkins/test-data/7dbe2139-61bf-3334-cb98-f07ff23a0452/WALs/b712f9af2c12,42583,1732376709984, archiveDir=hdfs://localhost:37709/user/jenkins/test-data/7dbe2139-61bf-3334-cb98-f07ff23a0452/oldWALs, maxLogs=32 2024-11-23T15:45:10,854 INFO [RS:1;b712f9af2c12:42583 {}] monitor.StreamSlowMonitor(122): New stream slow monitor b712f9af2c12%2C42583%2C1732376709984.1732376710853 2024-11-23T15:45:10,855 INFO [RS:0;b712f9af2c12:42451 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=b712f9af2c12%2C42451%2C1732376709944, suffix=, logDir=hdfs://localhost:37709/user/jenkins/test-data/7dbe2139-61bf-3334-cb98-f07ff23a0452/WALs/b712f9af2c12,42451,1732376709944, archiveDir=hdfs://localhost:37709/user/jenkins/test-data/7dbe2139-61bf-3334-cb98-f07ff23a0452/oldWALs, maxLogs=32 2024-11-23T15:45:10,857 INFO [RS:0;b712f9af2c12:42451 {}] monitor.StreamSlowMonitor(122): New stream slow monitor b712f9af2c12%2C42451%2C1732376709944.1732376710857 2024-11-23T15:45:10,858 INFO [RS:2;b712f9af2c12:45655 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=b712f9af2c12%2C45655%2C1732376710020, suffix=, logDir=hdfs://localhost:37709/user/jenkins/test-data/7dbe2139-61bf-3334-cb98-f07ff23a0452/WALs/b712f9af2c12,45655,1732376710020, archiveDir=hdfs://localhost:37709/user/jenkins/test-data/7dbe2139-61bf-3334-cb98-f07ff23a0452/oldWALs, maxLogs=32 2024-11-23T15:45:10,859 INFO [RS:2;b712f9af2c12:45655 {}] monitor.StreamSlowMonitor(122): New stream slow monitor b712f9af2c12%2C45655%2C1732376710020.1732376710859 2024-11-23T15:45:10,864 INFO [RS:1;b712f9af2c12:42583 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/7dbe2139-61bf-3334-cb98-f07ff23a0452/WALs/b712f9af2c12,42583,1732376709984/b712f9af2c12%2C42583%2C1732376709984.1732376710853 2024-11-23T15:45:10,867 DEBUG [RS:1;b712f9af2c12:42583 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41127:41127),(127.0.0.1/127.0.0.1:38073:38073),(127.0.0.1/127.0.0.1:43721:43721)] 2024-11-23T15:45:10,867 INFO [RS:0;b712f9af2c12:42451 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/7dbe2139-61bf-3334-cb98-f07ff23a0452/WALs/b712f9af2c12,42451,1732376709944/b712f9af2c12%2C42451%2C1732376709944.1732376710857 2024-11-23T15:45:10,875 DEBUG [RS:0;b712f9af2c12:42451 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41127:41127),(127.0.0.1/127.0.0.1:38073:38073),(127.0.0.1/127.0.0.1:43721:43721)] 2024-11-23T15:45:10,877 INFO [RS:2;b712f9af2c12:45655 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/7dbe2139-61bf-3334-cb98-f07ff23a0452/WALs/b712f9af2c12,45655,1732376710020/b712f9af2c12%2C45655%2C1732376710020.1732376710859 2024-11-23T15:45:10,881 DEBUG [RS:2;b712f9af2c12:45655 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43721:43721),(127.0.0.1/127.0.0.1:41127:41127),(127.0.0.1/127.0.0.1:38073:38073)] 2024-11-23T15:45:10,938 DEBUG [b712f9af2c12:46737 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=3, allServersCount=3 2024-11-23T15:45:10,938 DEBUG [b712f9af2c12:46737 {}] balancer.BalancerClusterState(204): Hosts are {b712f9af2c12=0} racks are {/default-rack=0} 2024-11-23T15:45:10,942 DEBUG [b712f9af2c12:46737 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-23T15:45:10,942 DEBUG [b712f9af2c12:46737 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-23T15:45:10,942 DEBUG [b712f9af2c12:46737 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-23T15:45:10,942 DEBUG [b712f9af2c12:46737 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-23T15:45:10,942 DEBUG [b712f9af2c12:46737 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-23T15:45:10,942 DEBUG [b712f9af2c12:46737 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-23T15:45:10,942 INFO [b712f9af2c12:46737 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-23T15:45:10,942 INFO [b712f9af2c12:46737 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-23T15:45:10,942 INFO [b712f9af2c12:46737 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-23T15:45:10,942 DEBUG [b712f9af2c12:46737 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-23T15:45:10,943 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=b712f9af2c12,45655,1732376710020 2024-11-23T15:45:10,944 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as b712f9af2c12,45655,1732376710020, state=OPENING 2024-11-23T15:45:10,958 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-23T15:45:10,966 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45655-0x10169a85d4b0003, quorum=127.0.0.1:53351, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T15:45:10,966 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46737-0x10169a85d4b0000, quorum=127.0.0.1:53351, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T15:45:10,966 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42451-0x10169a85d4b0001, quorum=127.0.0.1:53351, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T15:45:10,966 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42583-0x10169a85d4b0002, quorum=127.0.0.1:53351, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T15:45:10,967 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-23T15:45:10,967 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-23T15:45:10,967 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-23T15:45:10,967 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-23T15:45:10,967 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=b712f9af2c12,45655,1732376710020}] 2024-11-23T15:45:10,967 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-23T15:45:11,121 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-23T15:45:11,122 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-10-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58571, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-23T15:45:11,127 INFO [RS_OPEN_META-regionserver/b712f9af2c12:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-23T15:45:11,128 INFO [RS_OPEN_META-regionserver/b712f9af2c12:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-23T15:45:11,131 INFO [RS_OPEN_META-regionserver/b712f9af2c12:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=b712f9af2c12%2C45655%2C1732376710020.meta, suffix=.meta, logDir=hdfs://localhost:37709/user/jenkins/test-data/7dbe2139-61bf-3334-cb98-f07ff23a0452/WALs/b712f9af2c12,45655,1732376710020, archiveDir=hdfs://localhost:37709/user/jenkins/test-data/7dbe2139-61bf-3334-cb98-f07ff23a0452/oldWALs, maxLogs=32 2024-11-23T15:45:11,132 INFO [RS_OPEN_META-regionserver/b712f9af2c12:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor b712f9af2c12%2C45655%2C1732376710020.meta.1732376711132.meta 2024-11-23T15:45:11,141 INFO [RS_OPEN_META-regionserver/b712f9af2c12:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/7dbe2139-61bf-3334-cb98-f07ff23a0452/WALs/b712f9af2c12,45655,1732376710020/b712f9af2c12%2C45655%2C1732376710020.meta.1732376711132.meta 2024-11-23T15:45:11,142 DEBUG [RS_OPEN_META-regionserver/b712f9af2c12:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38073:38073),(127.0.0.1/127.0.0.1:41127:41127),(127.0.0.1/127.0.0.1:43721:43721)] 2024-11-23T15:45:11,143 DEBUG [RS_OPEN_META-regionserver/b712f9af2c12:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-23T15:45:11,144 DEBUG [RS_OPEN_META-regionserver/b712f9af2c12:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-23T15:45:11,144 DEBUG [RS_OPEN_META-regionserver/b712f9af2c12:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-23T15:45:11,144 INFO [RS_OPEN_META-regionserver/b712f9af2c12:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-23T15:45:11,144 DEBUG [RS_OPEN_META-regionserver/b712f9af2c12:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-23T15:45:11,144 DEBUG [RS_OPEN_META-regionserver/b712f9af2c12:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T15:45:11,144 DEBUG [RS_OPEN_META-regionserver/b712f9af2c12:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-23T15:45:11,145 DEBUG [RS_OPEN_META-regionserver/b712f9af2c12:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-23T15:45:11,146 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-23T15:45:11,147 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-23T15:45:11,147 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:45:11,148 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T15:45:11,148 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-23T15:45:11,149 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-23T15:45:11,149 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:45:11,149 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T15:45:11,150 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-23T15:45:11,151 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-23T15:45:11,151 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:45:11,151 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T15:45:11,151 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-23T15:45:11,152 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-23T15:45:11,152 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:45:11,153 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T15:45:11,153 DEBUG [RS_OPEN_META-regionserver/b712f9af2c12:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-23T15:45:11,154 DEBUG [RS_OPEN_META-regionserver/b712f9af2c12:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37709/user/jenkins/test-data/7dbe2139-61bf-3334-cb98-f07ff23a0452/data/hbase/meta/1588230740 2024-11-23T15:45:11,156 DEBUG [RS_OPEN_META-regionserver/b712f9af2c12:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37709/user/jenkins/test-data/7dbe2139-61bf-3334-cb98-f07ff23a0452/data/hbase/meta/1588230740 2024-11-23T15:45:11,157 DEBUG [RS_OPEN_META-regionserver/b712f9af2c12:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-23T15:45:11,158 DEBUG [RS_OPEN_META-regionserver/b712f9af2c12:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-23T15:45:11,158 DEBUG [RS_OPEN_META-regionserver/b712f9af2c12:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-23T15:45:11,160 DEBUG [RS_OPEN_META-regionserver/b712f9af2c12:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-23T15:45:11,162 INFO [RS_OPEN_META-regionserver/b712f9af2c12:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=64692331, jitterRate=-0.03600914776325226}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-23T15:45:11,162 DEBUG [RS_OPEN_META-regionserver/b712f9af2c12:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-23T15:45:11,164 DEBUG [RS_OPEN_META-regionserver/b712f9af2c12:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732376711145Writing region info on filesystem at 1732376711145Initializing all the Stores at 1732376711146 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732376711146Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732376711146Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732376711146Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732376711146Cleaning up temporary data from old regions at 1732376711158 (+12 ms)Running coprocessor post-open hooks at 1732376711162 (+4 ms)Region opened successfully at 1732376711163 (+1 ms) 2024-11-23T15:45:11,165 INFO [RS_OPEN_META-regionserver/b712f9af2c12:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732376711121 2024-11-23T15:45:11,169 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-23T15:45:11,169 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-23T15:45:11,170 DEBUG [RS_OPEN_META-regionserver/b712f9af2c12:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-23T15:45:11,170 INFO [RS_OPEN_META-regionserver/b712f9af2c12:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-23T15:45:11,171 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=b712f9af2c12,45655,1732376710020 2024-11-23T15:45:11,171 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-23T15:45:11,171 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-23T15:45:11,172 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-23T15:45:11,172 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-23T15:45:11,173 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as b712f9af2c12,45655,1732376710020, state=OPEN 2024-11-23T15:45:11,224 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42583-0x10169a85d4b0002, quorum=127.0.0.1:53351, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-23T15:45:11,224 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42451-0x10169a85d4b0001, quorum=127.0.0.1:53351, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-23T15:45:11,224 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46737-0x10169a85d4b0000, quorum=127.0.0.1:53351, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-23T15:45:11,224 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45655-0x10169a85d4b0003, quorum=127.0.0.1:53351, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-23T15:45:11,224 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=b712f9af2c12,45655,1732376710020 2024-11-23T15:45:11,224 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-23T15:45:11,224 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-23T15:45:11,224 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-23T15:45:11,224 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-23T15:45:11,229 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-23T15:45:11,229 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=b712f9af2c12,45655,1732376710020 in 257 msec 2024-11-23T15:45:11,232 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-23T15:45:11,232 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 696 msec 2024-11-23T15:45:11,233 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-23T15:45:11,234 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-23T15:45:11,235 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-23T15:45:11,235 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b712f9af2c12,45655,1732376710020, seqNum=-1] 2024-11-23T15:45:11,236 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T15:45:11,238 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57311, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T15:45:11,247 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 784 msec 2024-11-23T15:45:11,248 INFO [master/b712f9af2c12:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732376711248, completionTime=-1 2024-11-23T15:45:11,248 INFO [master/b712f9af2c12:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=3; waited=0ms, expected min=3 server(s), max=3 server(s), master is running 2024-11-23T15:45:11,248 DEBUG [master/b712f9af2c12:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-23T15:45:11,250 INFO [master/b712f9af2c12:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=3 2024-11-23T15:45:11,250 INFO [master/b712f9af2c12:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732376771250 2024-11-23T15:45:11,250 INFO [master/b712f9af2c12:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732376831250 2024-11-23T15:45:11,250 INFO [master/b712f9af2c12:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-11-23T15:45:11,251 DEBUG [master/b712f9af2c12:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 1588230740 changed from -1.0 to 0.0, refreshing cache 2024-11-23T15:45:11,251 INFO [master/b712f9af2c12:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=b712f9af2c12,46737,1732376709795-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-23T15:45:11,251 INFO [master/b712f9af2c12:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=b712f9af2c12,46737,1732376709795-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-23T15:45:11,251 INFO [master/b712f9af2c12:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=b712f9af2c12,46737,1732376709795-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-23T15:45:11,251 INFO [master/b712f9af2c12:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-b712f9af2c12:46737, period=300000, unit=MILLISECONDS is enabled. 2024-11-23T15:45:11,252 INFO [master/b712f9af2c12:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-23T15:45:11,252 INFO [master/b712f9af2c12:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-23T15:45:11,254 DEBUG [master/b712f9af2c12:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-23T15:45:11,258 INFO [master/b712f9af2c12:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.161sec 2024-11-23T15:45:11,259 INFO [master/b712f9af2c12:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-23T15:45:11,259 INFO [master/b712f9af2c12:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-23T15:45:11,259 INFO [master/b712f9af2c12:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-23T15:45:11,259 INFO [master/b712f9af2c12:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-23T15:45:11,259 INFO [master/b712f9af2c12:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-23T15:45:11,259 INFO [master/b712f9af2c12:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=b712f9af2c12,46737,1732376709795-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-23T15:45:11,259 INFO [master/b712f9af2c12:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=b712f9af2c12,46737,1732376709795-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-23T15:45:11,262 DEBUG [master/b712f9af2c12:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-23T15:45:11,262 INFO [master/b712f9af2c12:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-23T15:45:11,262 INFO [master/b712f9af2c12:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=b712f9af2c12,46737,1732376709795-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-23T15:45:11,352 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@57fcd476, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T15:45:11,352 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request b712f9af2c12,46737,-1 for getting cluster id 2024-11-23T15:45:11,352 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-23T15:45:11,354 DEBUG [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '8a48ee27-4f7d-4a73-9417-d11b2495e478' 2024-11-23T15:45:11,354 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-23T15:45:11,355 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "8a48ee27-4f7d-4a73-9417-d11b2495e478" 2024-11-23T15:45:11,355 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2df66568, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T15:45:11,355 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b712f9af2c12,46737,-1] 2024-11-23T15:45:11,355 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-23T15:45:11,357 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T15:45:11,359 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50268, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-23T15:45:11,360 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@78f18ed4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T15:45:11,361 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-23T15:45:11,362 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b712f9af2c12,45655,1732376710020, seqNum=-1] 2024-11-23T15:45:11,363 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T15:45:11,365 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-10-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44298, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T15:45:11,368 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=b712f9af2c12,46737,1732376709795 2024-11-23T15:45:11,369 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-23T15:45:11,370 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] client.AsyncConnectionImpl(321): The fetched master address is b712f9af2c12,46737,1732376709795 2024-11-23T15:45:11,371 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@7d193a4e 2024-11-23T15:45:11,371 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-23T15:45:11,373 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50278, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-23T15:45:11,374 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46737 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-23T15:45:11,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46737 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC 2024-11-23T15:45:11,379 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_PRE_OPERATION 2024-11-23T15:45:11,379 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:45:11,379 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46737 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestHBaseWalOnEC" procId is: 4 2024-11-23T15:45:11,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-23T15:45:11,381 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-23T15:45:11,398 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33725 is added to blk_1073741837_1013 (size=392) 2024-11-23T15:45:11,398 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34053 is added to blk_1073741837_1013 (size=392) 2024-11-23T15:45:11,399 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741837_1013 (size=392) 2024-11-23T15:45:11,402 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 6c1a5b4f1501e6decd41434972e59155, NAME => 'TestHBaseWalOnEC,,1732376711374.6c1a5b4f1501e6decd41434972e59155.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37709/user/jenkins/test-data/7dbe2139-61bf-3334-cb98-f07ff23a0452 2024-11-23T15:45:11,413 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33725 is added to blk_1073741838_1014 (size=51) 2024-11-23T15:45:11,413 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741838_1014 (size=51) 2024-11-23T15:45:11,413 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34053 is added to blk_1073741838_1014 (size=51) 2024-11-23T15:45:11,414 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1732376711374.6c1a5b4f1501e6decd41434972e59155.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T15:45:11,414 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1722): Closing 6c1a5b4f1501e6decd41434972e59155, disabling compactions & flushes 2024-11-23T15:45:11,414 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1732376711374.6c1a5b4f1501e6decd41434972e59155. 2024-11-23T15:45:11,414 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1732376711374.6c1a5b4f1501e6decd41434972e59155. 2024-11-23T15:45:11,414 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1732376711374.6c1a5b4f1501e6decd41434972e59155. after waiting 0 ms 2024-11-23T15:45:11,414 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1732376711374.6c1a5b4f1501e6decd41434972e59155. 2024-11-23T15:45:11,414 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1732376711374.6c1a5b4f1501e6decd41434972e59155. 2024-11-23T15:45:11,414 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1676): Region close journal for 6c1a5b4f1501e6decd41434972e59155: Waiting for close lock at 1732376711414Disabling compacts and flushes for region at 1732376711414Disabling writes for close at 1732376711414Writing region close event to WAL at 1732376711414Closed at 1732376711414 2024-11-23T15:45:11,417 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ADD_TO_META 2024-11-23T15:45:11,417 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestHBaseWalOnEC,,1732376711374.6c1a5b4f1501e6decd41434972e59155.","families":{"info":[{"qualifier":"regioninfo","vlen":50,"tag":[],"timestamp":"1732376711417"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732376711417"}]},"ts":"1732376711417"} 2024-11-23T15:45:11,420 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-23T15:45:11,422 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-23T15:45:11,423 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732376711422"}]},"ts":"1732376711422"} 2024-11-23T15:45:11,427 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLING in hbase:meta 2024-11-23T15:45:11,427 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {b712f9af2c12=0} racks are {/default-rack=0} 2024-11-23T15:45:11,428 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-23T15:45:11,428 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-23T15:45:11,428 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-23T15:45:11,428 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-23T15:45:11,428 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-23T15:45:11,428 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-23T15:45:11,428 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-23T15:45:11,428 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-23T15:45:11,428 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-23T15:45:11,428 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-23T15:45:11,429 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=6c1a5b4f1501e6decd41434972e59155, ASSIGN}] 2024-11-23T15:45:11,431 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=6c1a5b4f1501e6decd41434972e59155, ASSIGN 2024-11-23T15:45:11,432 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=6c1a5b4f1501e6decd41434972e59155, ASSIGN; state=OFFLINE, location=b712f9af2c12,42583,1732376709984; forceNewPlan=false, retain=false 2024-11-23T15:45:11,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-23T15:45:11,583 INFO [b712f9af2c12:46737 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-11-23T15:45:11,583 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=6c1a5b4f1501e6decd41434972e59155, regionState=OPENING, regionLocation=b712f9af2c12,42583,1732376709984 2024-11-23T15:45:11,586 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-10-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=6c1a5b4f1501e6decd41434972e59155, ASSIGN because future has completed 2024-11-23T15:45:11,587 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 6c1a5b4f1501e6decd41434972e59155, server=b712f9af2c12,42583,1732376709984}] 2024-11-23T15:45:11,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-23T15:45:11,741 DEBUG [RSProcedureDispatcher-pool-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-23T15:45:11,743 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-9-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55747, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-23T15:45:11,749 INFO [RS_OPEN_REGION-regionserver/b712f9af2c12:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestHBaseWalOnEC,,1732376711374.6c1a5b4f1501e6decd41434972e59155. 2024-11-23T15:45:11,749 DEBUG [RS_OPEN_REGION-regionserver/b712f9af2c12:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 6c1a5b4f1501e6decd41434972e59155, NAME => 'TestHBaseWalOnEC,,1732376711374.6c1a5b4f1501e6decd41434972e59155.', STARTKEY => '', ENDKEY => ''} 2024-11-23T15:45:11,749 DEBUG [RS_OPEN_REGION-regionserver/b712f9af2c12:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestHBaseWalOnEC 6c1a5b4f1501e6decd41434972e59155 2024-11-23T15:45:11,749 DEBUG [RS_OPEN_REGION-regionserver/b712f9af2c12:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1732376711374.6c1a5b4f1501e6decd41434972e59155.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T15:45:11,750 DEBUG [RS_OPEN_REGION-regionserver/b712f9af2c12:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 6c1a5b4f1501e6decd41434972e59155 2024-11-23T15:45:11,750 DEBUG [RS_OPEN_REGION-regionserver/b712f9af2c12:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 6c1a5b4f1501e6decd41434972e59155 2024-11-23T15:45:11,755 INFO [StoreOpener-6c1a5b4f1501e6decd41434972e59155-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 6c1a5b4f1501e6decd41434972e59155 2024-11-23T15:45:11,757 INFO [StoreOpener-6c1a5b4f1501e6decd41434972e59155-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 6c1a5b4f1501e6decd41434972e59155 columnFamilyName cf 2024-11-23T15:45:11,757 DEBUG [StoreOpener-6c1a5b4f1501e6decd41434972e59155-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:45:11,757 INFO [StoreOpener-6c1a5b4f1501e6decd41434972e59155-1 {}] regionserver.HStore(327): Store=6c1a5b4f1501e6decd41434972e59155/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T15:45:11,758 DEBUG [RS_OPEN_REGION-regionserver/b712f9af2c12:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 6c1a5b4f1501e6decd41434972e59155 2024-11-23T15:45:11,759 DEBUG [RS_OPEN_REGION-regionserver/b712f9af2c12:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37709/user/jenkins/test-data/7dbe2139-61bf-3334-cb98-f07ff23a0452/data/default/TestHBaseWalOnEC/6c1a5b4f1501e6decd41434972e59155 2024-11-23T15:45:11,759 DEBUG [RS_OPEN_REGION-regionserver/b712f9af2c12:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37709/user/jenkins/test-data/7dbe2139-61bf-3334-cb98-f07ff23a0452/data/default/TestHBaseWalOnEC/6c1a5b4f1501e6decd41434972e59155 2024-11-23T15:45:11,760 DEBUG [RS_OPEN_REGION-regionserver/b712f9af2c12:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 6c1a5b4f1501e6decd41434972e59155 2024-11-23T15:45:11,760 DEBUG [RS_OPEN_REGION-regionserver/b712f9af2c12:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 6c1a5b4f1501e6decd41434972e59155 2024-11-23T15:45:11,762 DEBUG [RS_OPEN_REGION-regionserver/b712f9af2c12:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 6c1a5b4f1501e6decd41434972e59155 2024-11-23T15:45:11,765 DEBUG [RS_OPEN_REGION-regionserver/b712f9af2c12:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37709/user/jenkins/test-data/7dbe2139-61bf-3334-cb98-f07ff23a0452/data/default/TestHBaseWalOnEC/6c1a5b4f1501e6decd41434972e59155/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-23T15:45:11,765 INFO [RS_OPEN_REGION-regionserver/b712f9af2c12:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 6c1a5b4f1501e6decd41434972e59155; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=61880881, jitterRate=-0.07790301740169525}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-23T15:45:11,766 DEBUG [RS_OPEN_REGION-regionserver/b712f9af2c12:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 6c1a5b4f1501e6decd41434972e59155 2024-11-23T15:45:11,767 DEBUG [RS_OPEN_REGION-regionserver/b712f9af2c12:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 6c1a5b4f1501e6decd41434972e59155: Running coprocessor pre-open hook at 1732376711750Writing region info on filesystem at 1732376711750Initializing all the Stores at 1732376711753 (+3 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732376711754 (+1 ms)Cleaning up temporary data from old regions at 1732376711760 (+6 ms)Running coprocessor post-open hooks at 1732376711766 (+6 ms)Region opened successfully at 1732376711767 (+1 ms) 2024-11-23T15:45:11,768 INFO [RS_OPEN_REGION-regionserver/b712f9af2c12:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestHBaseWalOnEC,,1732376711374.6c1a5b4f1501e6decd41434972e59155., pid=6, masterSystemTime=1732376711741 2024-11-23T15:45:11,774 DEBUG [RS_OPEN_REGION-regionserver/b712f9af2c12:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestHBaseWalOnEC,,1732376711374.6c1a5b4f1501e6decd41434972e59155. 2024-11-23T15:45:11,774 INFO [RS_OPEN_REGION-regionserver/b712f9af2c12:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestHBaseWalOnEC,,1732376711374.6c1a5b4f1501e6decd41434972e59155. 2024-11-23T15:45:11,776 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=6c1a5b4f1501e6decd41434972e59155, regionState=OPEN, openSeqNum=2, regionLocation=b712f9af2c12,42583,1732376709984 2024-11-23T15:45:11,780 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-10-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 6c1a5b4f1501e6decd41434972e59155, server=b712f9af2c12,42583,1732376709984 because future has completed 2024-11-23T15:45:11,789 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-23T15:45:11,789 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 6c1a5b4f1501e6decd41434972e59155, server=b712f9af2c12,42583,1732376709984 in 196 msec 2024-11-23T15:45:11,794 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-23T15:45:11,795 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=6c1a5b4f1501e6decd41434972e59155, ASSIGN in 362 msec 2024-11-23T15:45:11,796 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-23T15:45:11,797 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732376711796"}]},"ts":"1732376711796"} 2024-11-23T15:45:11,800 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLED in hbase:meta 2024-11-23T15:45:11,803 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_POST_OPERATION 2024-11-23T15:45:11,806 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC in 429 msec 2024-11-23T15:45:12,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-23T15:45:12,010 INFO [RPCClient-NioEventLoopGroup-6-8 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestHBaseWalOnEC completed 2024-11-23T15:45:12,010 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table TestHBaseWalOnEC get assigned. Timeout = 60000ms 2024-11-23T15:45:12,011 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-23T15:45:12,014 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table TestHBaseWalOnEC assigned to meta. Checking AM states. 2024-11-23T15:45:12,014 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-23T15:45:12,014 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table TestHBaseWalOnEC assigned. 2024-11-23T15:45:12,019 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestHBaseWalOnEC', row='row', locateType=CURRENT is [region=TestHBaseWalOnEC,,1732376711374.6c1a5b4f1501e6decd41434972e59155., hostname=b712f9af2c12,42583,1732376709984, seqNum=2] 2024-11-23T15:45:12,019 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T15:45:12,022 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-9-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56834, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T15:45:12,027 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46737 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestHBaseWalOnEC 2024-11-23T15:45:12,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46737 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC 2024-11-23T15:45:12,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-23T15:45:12,031 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_PREPARE 2024-11-23T15:45:12,033 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-23T15:45:12,033 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-23T15:45:12,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-23T15:45:12,188 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42583 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-11-23T15:45:12,189 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b712f9af2c12:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestHBaseWalOnEC,,1732376711374.6c1a5b4f1501e6decd41434972e59155. 2024-11-23T15:45:12,189 INFO [RS_FLUSH_OPERATIONS-regionserver/b712f9af2c12:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing 6c1a5b4f1501e6decd41434972e59155 1/1 column families, dataSize=32 B heapSize=360 B 2024-11-23T15:45:12,207 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b712f9af2c12:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37709/user/jenkins/test-data/7dbe2139-61bf-3334-cb98-f07ff23a0452/data/default/TestHBaseWalOnEC/6c1a5b4f1501e6decd41434972e59155/.tmp/cf/6da101529c344748826f91f1400f17e7 is 36, key is row/cf:cq/1732376712023/Put/seqid=0 2024-11-23T15:45:12,221 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741839_1015 (size=4787) 2024-11-23T15:45:12,221 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33725 is added to blk_1073741839_1015 (size=4787) 2024-11-23T15:45:12,223 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34053 is added to blk_1073741839_1015 (size=4787) 2024-11-23T15:45:12,224 INFO [RS_FLUSH_OPERATIONS-regionserver/b712f9af2c12:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=32 B at sequenceid=5 (bloomFilter=false), to=hdfs://localhost:37709/user/jenkins/test-data/7dbe2139-61bf-3334-cb98-f07ff23a0452/data/default/TestHBaseWalOnEC/6c1a5b4f1501e6decd41434972e59155/.tmp/cf/6da101529c344748826f91f1400f17e7 2024-11-23T15:45:12,236 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b712f9af2c12:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37709/user/jenkins/test-data/7dbe2139-61bf-3334-cb98-f07ff23a0452/data/default/TestHBaseWalOnEC/6c1a5b4f1501e6decd41434972e59155/.tmp/cf/6da101529c344748826f91f1400f17e7 as hdfs://localhost:37709/user/jenkins/test-data/7dbe2139-61bf-3334-cb98-f07ff23a0452/data/default/TestHBaseWalOnEC/6c1a5b4f1501e6decd41434972e59155/cf/6da101529c344748826f91f1400f17e7 2024-11-23T15:45:12,249 INFO [RS_FLUSH_OPERATIONS-regionserver/b712f9af2c12:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37709/user/jenkins/test-data/7dbe2139-61bf-3334-cb98-f07ff23a0452/data/default/TestHBaseWalOnEC/6c1a5b4f1501e6decd41434972e59155/cf/6da101529c344748826f91f1400f17e7, entries=1, sequenceid=5, filesize=4.7 K 2024-11-23T15:45:12,250 INFO [RS_FLUSH_OPERATIONS-regionserver/b712f9af2c12:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~32 B/32, heapSize ~344 B/344, currentSize=0 B/0 for 6c1a5b4f1501e6decd41434972e59155 in 61ms, sequenceid=5, compaction requested=false 2024-11-23T15:45:12,250 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b712f9af2c12:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for 6c1a5b4f1501e6decd41434972e59155: 2024-11-23T15:45:12,251 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b712f9af2c12:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestHBaseWalOnEC,,1732376711374.6c1a5b4f1501e6decd41434972e59155. 2024-11-23T15:45:12,251 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b712f9af2c12:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-11-23T15:45:12,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46737 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-11-23T15:45:12,256 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-11-23T15:45:12,256 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 220 msec 2024-11-23T15:45:12,261 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC in 231 msec 2024-11-23T15:45:12,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-23T15:45:12,351 INFO [RPCClient-NioEventLoopGroup-6-8 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestHBaseWalOnEC completed 2024-11-23T15:45:12,355 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-23T15:45:12,355 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-23T15:45:12,355 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-23T15:45:12,355 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T15:45:12,355 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T15:45:12,355 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-23T15:45:12,355 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-23T15:45:12,356 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1124530655, stopped=false 2024-11-23T15:45:12,356 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=b712f9af2c12,46737,1732376709795 2024-11-23T15:45:12,405 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46737-0x10169a85d4b0000, quorum=127.0.0.1:53351, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-23T15:45:12,405 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45655-0x10169a85d4b0003, quorum=127.0.0.1:53351, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-23T15:45:12,405 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42451-0x10169a85d4b0001, quorum=127.0.0.1:53351, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-23T15:45:12,405 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45655-0x10169a85d4b0003, quorum=127.0.0.1:53351, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T15:45:12,405 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42451-0x10169a85d4b0001, quorum=127.0.0.1:53351, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T15:45:12,405 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46737-0x10169a85d4b0000, quorum=127.0.0.1:53351, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T15:45:12,405 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42583-0x10169a85d4b0002, quorum=127.0.0.1:53351, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-23T15:45:12,405 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42583-0x10169a85d4b0002, quorum=127.0.0.1:53351, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T15:45:12,405 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-23T15:45:12,406 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-23T15:45:12,406 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:45655-0x10169a85d4b0003, quorum=127.0.0.1:53351, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-23T15:45:12,406 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:42451-0x10169a85d4b0001, quorum=127.0.0.1:53351, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-23T15:45:12,406 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-23T15:45:12,406 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:42583-0x10169a85d4b0002, quorum=127.0.0.1:53351, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-23T15:45:12,406 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T15:45:12,407 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:46737-0x10169a85d4b0000, quorum=127.0.0.1:53351, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-23T15:45:12,407 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'b712f9af2c12,42451,1732376709944' ***** 2024-11-23T15:45:12,407 INFO [RS:0;b712f9af2c12:42451 {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'b712f9af2c12,42451,1732376709944' ***** 2024-11-23T15:45:12,407 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-23T15:45:12,407 INFO [RS:0;b712f9af2c12:42451 {}] regionserver.HRegionServer(2210): STOPPED: Exiting; cluster shutdown set and not carrying any regions 2024-11-23T15:45:12,407 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'b712f9af2c12,42583,1732376709984' ***** 2024-11-23T15:45:12,407 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-23T15:45:12,407 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'b712f9af2c12,45655,1732376710020' ***** 2024-11-23T15:45:12,407 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-23T15:45:12,408 INFO [RS:2;b712f9af2c12:45655 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-23T15:45:12,408 INFO [RS:2;b712f9af2c12:45655 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-23T15:45:12,408 INFO [RS:2;b712f9af2c12:45655 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-23T15:45:12,408 INFO [RS:2;b712f9af2c12:45655 {}] regionserver.HRegionServer(959): stopping server b712f9af2c12,45655,1732376710020 2024-11-23T15:45:12,408 INFO [RS:1;b712f9af2c12:42583 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-23T15:45:12,408 INFO [RS:2;b712f9af2c12:45655 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-23T15:45:12,408 INFO [RS:1;b712f9af2c12:42583 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-23T15:45:12,408 INFO [RS:2;b712f9af2c12:45655 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:2;b712f9af2c12:45655. 2024-11-23T15:45:12,408 INFO [RS:1;b712f9af2c12:42583 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-23T15:45:12,408 INFO [RS:1;b712f9af2c12:42583 {}] regionserver.HRegionServer(3091): Received CLOSE for 6c1a5b4f1501e6decd41434972e59155 2024-11-23T15:45:12,408 DEBUG [RS:2;b712f9af2c12:45655 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-23T15:45:12,408 DEBUG [RS:2;b712f9af2c12:45655 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T15:45:12,408 INFO [RS:2;b712f9af2c12:45655 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-23T15:45:12,409 INFO [RS:2;b712f9af2c12:45655 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-23T15:45:12,409 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-23T15:45:12,409 INFO [RS:2;b712f9af2c12:45655 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-23T15:45:12,409 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-23T15:45:12,409 INFO [RS:2;b712f9af2c12:45655 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-23T15:45:12,409 INFO [RS:0;b712f9af2c12:42451 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-23T15:45:12,409 INFO [RS:1;b712f9af2c12:42583 {}] regionserver.HRegionServer(959): stopping server b712f9af2c12,42583,1732376709984 2024-11-23T15:45:12,409 INFO [RS:1;b712f9af2c12:42583 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-23T15:45:12,409 INFO [RS:0;b712f9af2c12:42451 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-23T15:45:12,409 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-23T15:45:12,409 INFO [RS:1;b712f9af2c12:42583 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;b712f9af2c12:42583. 2024-11-23T15:45:12,409 INFO [RS:0;b712f9af2c12:42451 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-23T15:45:12,409 INFO [RS:0;b712f9af2c12:42451 {}] regionserver.HRegionServer(959): stopping server b712f9af2c12,42451,1732376709944 2024-11-23T15:45:12,409 INFO [RS:0;b712f9af2c12:42451 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-23T15:45:12,409 DEBUG [RS:1;b712f9af2c12:42583 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-23T15:45:12,409 DEBUG [RS_CLOSE_REGION-regionserver/b712f9af2c12:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 6c1a5b4f1501e6decd41434972e59155, disabling compactions & flushes 2024-11-23T15:45:12,409 INFO [RS:0;b712f9af2c12:42451 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;b712f9af2c12:42451. 2024-11-23T15:45:12,409 DEBUG [RS:1;b712f9af2c12:42583 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T15:45:12,409 INFO [RS_CLOSE_REGION-regionserver/b712f9af2c12:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1732376711374.6c1a5b4f1501e6decd41434972e59155. 2024-11-23T15:45:12,410 DEBUG [RS_CLOSE_REGION-regionserver/b712f9af2c12:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1732376711374.6c1a5b4f1501e6decd41434972e59155. 2024-11-23T15:45:12,410 DEBUG [RS:0;b712f9af2c12:42451 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-23T15:45:12,410 INFO [RS:1;b712f9af2c12:42583 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-23T15:45:12,410 DEBUG [RS:0;b712f9af2c12:42451 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T15:45:12,410 DEBUG [RS_CLOSE_REGION-regionserver/b712f9af2c12:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1732376711374.6c1a5b4f1501e6decd41434972e59155. after waiting 0 ms 2024-11-23T15:45:12,410 DEBUG [RS:1;b712f9af2c12:42583 {}] regionserver.HRegionServer(1325): Online Regions={6c1a5b4f1501e6decd41434972e59155=TestHBaseWalOnEC,,1732376711374.6c1a5b4f1501e6decd41434972e59155.} 2024-11-23T15:45:12,410 DEBUG [RS_CLOSE_REGION-regionserver/b712f9af2c12:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1732376711374.6c1a5b4f1501e6decd41434972e59155. 2024-11-23T15:45:12,410 DEBUG [RS:1;b712f9af2c12:42583 {}] regionserver.HRegionServer(1351): Waiting on 6c1a5b4f1501e6decd41434972e59155 2024-11-23T15:45:12,410 INFO [RS:0;b712f9af2c12:42451 {}] regionserver.HRegionServer(976): stopping server b712f9af2c12,42451,1732376709944; all regions closed. 2024-11-23T15:45:12,410 INFO [RS:2;b712f9af2c12:45655 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-23T15:45:12,410 DEBUG [RS:2;b712f9af2c12:45655 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-23T15:45:12,410 DEBUG [RS:2;b712f9af2c12:45655 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-23T15:45:12,410 DEBUG [RS_CLOSE_META-regionserver/b712f9af2c12:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-23T15:45:12,411 INFO [RS_CLOSE_META-regionserver/b712f9af2c12:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-23T15:45:12,411 DEBUG [RS_CLOSE_META-regionserver/b712f9af2c12:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-23T15:45:12,411 DEBUG [RS_CLOSE_META-regionserver/b712f9af2c12:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-23T15:45:12,411 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T15:45:12,412 DEBUG [RS_CLOSE_META-regionserver/b712f9af2c12:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-23T15:45:12,412 INFO [RS_CLOSE_META-regionserver/b712f9af2c12:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.34 KB heapSize=3.38 KB 2024-11-23T15:45:12,414 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T15:45:12,415 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T15:45:12,415 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T15:45:12,415 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T15:45:12,425 DEBUG [RS_CLOSE_REGION-regionserver/b712f9af2c12:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37709/user/jenkins/test-data/7dbe2139-61bf-3334-cb98-f07ff23a0452/data/default/TestHBaseWalOnEC/6c1a5b4f1501e6decd41434972e59155/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-11-23T15:45:12,426 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34053 is added to blk_1073741834_1010 (size=93) 2024-11-23T15:45:12,427 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33725 is added to blk_1073741834_1010 (size=93) 2024-11-23T15:45:12,427 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741834_1010 (size=93) 2024-11-23T15:45:12,428 INFO [RS_CLOSE_REGION-regionserver/b712f9af2c12:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1732376711374.6c1a5b4f1501e6decd41434972e59155. 2024-11-23T15:45:12,428 DEBUG [RS_CLOSE_REGION-regionserver/b712f9af2c12:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 6c1a5b4f1501e6decd41434972e59155: Waiting for close lock at 1732376712409Running coprocessor pre-close hooks at 1732376712409Disabling compacts and flushes for region at 1732376712409Disabling writes for close at 1732376712410 (+1 ms)Writing region close event to WAL at 1732376712411 (+1 ms)Running coprocessor post-close hooks at 1732376712427 (+16 ms)Closed at 1732376712428 (+1 ms) 2024-11-23T15:45:12,428 DEBUG [RS_CLOSE_REGION-regionserver/b712f9af2c12:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestHBaseWalOnEC,,1732376711374.6c1a5b4f1501e6decd41434972e59155. 2024-11-23T15:45:12,433 DEBUG [RS:0;b712f9af2c12:42451 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/7dbe2139-61bf-3334-cb98-f07ff23a0452/oldWALs 2024-11-23T15:45:12,433 INFO [RS:0;b712f9af2c12:42451 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog b712f9af2c12%2C42451%2C1732376709944:(num 1732376710857) 2024-11-23T15:45:12,433 DEBUG [RS:0;b712f9af2c12:42451 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T15:45:12,433 INFO [RS:0;b712f9af2c12:42451 {}] regionserver.LeaseManager(133): Closed leases 2024-11-23T15:45:12,433 INFO [RS:0;b712f9af2c12:42451 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-23T15:45:12,433 INFO [RS:0;b712f9af2c12:42451 {}] hbase.ChoreService(370): Chore service for: regionserver/b712f9af2c12:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-23T15:45:12,433 INFO [RS:0;b712f9af2c12:42451 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-23T15:45:12,433 INFO [RS:0;b712f9af2c12:42451 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-23T15:45:12,434 INFO [RS:0;b712f9af2c12:42451 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-23T15:45:12,434 INFO [RS:0;b712f9af2c12:42451 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-23T15:45:12,434 INFO [RS:0;b712f9af2c12:42451 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:42451 2024-11-23T15:45:12,434 INFO [regionserver/b712f9af2c12:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-23T15:45:12,440 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46737-0x10169a85d4b0000, quorum=127.0.0.1:53351, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-23T15:45:12,440 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42451-0x10169a85d4b0001, quorum=127.0.0.1:53351, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/b712f9af2c12,42451,1732376709944 2024-11-23T15:45:12,440 INFO [RS:0;b712f9af2c12:42451 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-23T15:45:12,443 DEBUG [RS_CLOSE_META-regionserver/b712f9af2c12:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37709/user/jenkins/test-data/7dbe2139-61bf-3334-cb98-f07ff23a0452/data/hbase/meta/1588230740/.tmp/info/4d2a51e06b4b4000bd7a66b921b05856 is 153, key is TestHBaseWalOnEC,,1732376711374.6c1a5b4f1501e6decd41434972e59155./info:regioninfo/1732376711776/Put/seqid=0 2024-11-23T15:45:12,445 WARN [IPC Server handler 4 on default port 37709 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-23T15:45:12,445 WARN [IPC Server handler 4 on default port 37709 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-23T15:45:12,445 WARN [IPC Server handler 4 on default port 37709 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-23T15:45:12,449 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [b712f9af2c12,42451,1732376709944] 2024-11-23T15:45:12,453 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741840_1016 (size=6637) 2024-11-23T15:45:12,453 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33725 is added to blk_1073741840_1016 (size=6637) 2024-11-23T15:45:12,454 INFO [RS_CLOSE_META-regionserver/b712f9af2c12:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.18 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:37709/user/jenkins/test-data/7dbe2139-61bf-3334-cb98-f07ff23a0452/data/hbase/meta/1588230740/.tmp/info/4d2a51e06b4b4000bd7a66b921b05856 2024-11-23T15:45:12,457 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/b712f9af2c12,42451,1732376709944 already deleted, retry=false 2024-11-23T15:45:12,457 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; b712f9af2c12,42451,1732376709944 expired; onlineServers=2 2024-11-23T15:45:12,479 DEBUG [RS_CLOSE_META-regionserver/b712f9af2c12:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37709/user/jenkins/test-data/7dbe2139-61bf-3334-cb98-f07ff23a0452/data/hbase/meta/1588230740/.tmp/ns/3fb35dceb15248138426e3b06334ebf4 is 43, key is default/ns:d/1732376711238/Put/seqid=0 2024-11-23T15:45:12,481 WARN [IPC Server handler 1 on default port 37709 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-23T15:45:12,481 WARN [IPC Server handler 1 on default port 37709 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-23T15:45:12,481 WARN [IPC Server handler 1 on default port 37709 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-23T15:45:12,487 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741841_1017 (size=5153) 2024-11-23T15:45:12,487 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33725 is added to blk_1073741841_1017 (size=5153) 2024-11-23T15:45:12,487 INFO [RS_CLOSE_META-regionserver/b712f9af2c12:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:37709/user/jenkins/test-data/7dbe2139-61bf-3334-cb98-f07ff23a0452/data/hbase/meta/1588230740/.tmp/ns/3fb35dceb15248138426e3b06334ebf4 2024-11-23T15:45:12,503 INFO [regionserver/b712f9af2c12:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-23T15:45:12,503 INFO [regionserver/b712f9af2c12:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-23T15:45:12,503 INFO [regionserver/b712f9af2c12:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-23T15:45:12,517 DEBUG [RS_CLOSE_META-regionserver/b712f9af2c12:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37709/user/jenkins/test-data/7dbe2139-61bf-3334-cb98-f07ff23a0452/data/hbase/meta/1588230740/.tmp/table/97592b7b705f41d694c8baf06edc611e is 52, key is TestHBaseWalOnEC/table:state/1732376711796/Put/seqid=0 2024-11-23T15:45:12,524 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741842_1018 (size=5249) 2024-11-23T15:45:12,524 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33725 is added to blk_1073741842_1018 (size=5249) 2024-11-23T15:45:12,524 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34053 is added to blk_1073741842_1018 (size=5249) 2024-11-23T15:45:12,525 INFO [RS_CLOSE_META-regionserver/b712f9af2c12:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=96 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:37709/user/jenkins/test-data/7dbe2139-61bf-3334-cb98-f07ff23a0452/data/hbase/meta/1588230740/.tmp/table/97592b7b705f41d694c8baf06edc611e 2024-11-23T15:45:12,534 DEBUG [RS_CLOSE_META-regionserver/b712f9af2c12:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37709/user/jenkins/test-data/7dbe2139-61bf-3334-cb98-f07ff23a0452/data/hbase/meta/1588230740/.tmp/info/4d2a51e06b4b4000bd7a66b921b05856 as hdfs://localhost:37709/user/jenkins/test-data/7dbe2139-61bf-3334-cb98-f07ff23a0452/data/hbase/meta/1588230740/info/4d2a51e06b4b4000bd7a66b921b05856 2024-11-23T15:45:12,545 INFO [RS_CLOSE_META-regionserver/b712f9af2c12:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37709/user/jenkins/test-data/7dbe2139-61bf-3334-cb98-f07ff23a0452/data/hbase/meta/1588230740/info/4d2a51e06b4b4000bd7a66b921b05856, entries=10, sequenceid=11, filesize=6.5 K 2024-11-23T15:45:12,547 DEBUG [RS_CLOSE_META-regionserver/b712f9af2c12:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37709/user/jenkins/test-data/7dbe2139-61bf-3334-cb98-f07ff23a0452/data/hbase/meta/1588230740/.tmp/ns/3fb35dceb15248138426e3b06334ebf4 as hdfs://localhost:37709/user/jenkins/test-data/7dbe2139-61bf-3334-cb98-f07ff23a0452/data/hbase/meta/1588230740/ns/3fb35dceb15248138426e3b06334ebf4 2024-11-23T15:45:12,549 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42451-0x10169a85d4b0001, quorum=127.0.0.1:53351, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-23T15:45:12,549 INFO [RS:0;b712f9af2c12:42451 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-23T15:45:12,549 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42451-0x10169a85d4b0001, quorum=127.0.0.1:53351, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-23T15:45:12,549 INFO [RS:0;b712f9af2c12:42451 {}] regionserver.HRegionServer(1031): Exiting; stopping=b712f9af2c12,42451,1732376709944; zookeeper connection closed. 2024-11-23T15:45:12,556 INFO [RS_CLOSE_META-regionserver/b712f9af2c12:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37709/user/jenkins/test-data/7dbe2139-61bf-3334-cb98-f07ff23a0452/data/hbase/meta/1588230740/ns/3fb35dceb15248138426e3b06334ebf4, entries=2, sequenceid=11, filesize=5.0 K 2024-11-23T15:45:12,558 DEBUG [RS_CLOSE_META-regionserver/b712f9af2c12:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37709/user/jenkins/test-data/7dbe2139-61bf-3334-cb98-f07ff23a0452/data/hbase/meta/1588230740/.tmp/table/97592b7b705f41d694c8baf06edc611e as hdfs://localhost:37709/user/jenkins/test-data/7dbe2139-61bf-3334-cb98-f07ff23a0452/data/hbase/meta/1588230740/table/97592b7b705f41d694c8baf06edc611e 2024-11-23T15:45:12,562 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@5f940e {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@5f940e 2024-11-23T15:45:12,569 INFO [RS_CLOSE_META-regionserver/b712f9af2c12:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37709/user/jenkins/test-data/7dbe2139-61bf-3334-cb98-f07ff23a0452/data/hbase/meta/1588230740/table/97592b7b705f41d694c8baf06edc611e, entries=2, sequenceid=11, filesize=5.1 K 2024-11-23T15:45:12,571 INFO [RS_CLOSE_META-regionserver/b712f9af2c12:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 158ms, sequenceid=11, compaction requested=false 2024-11-23T15:45:12,576 DEBUG [RS_CLOSE_META-regionserver/b712f9af2c12:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37709/user/jenkins/test-data/7dbe2139-61bf-3334-cb98-f07ff23a0452/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-23T15:45:12,577 DEBUG [RS_CLOSE_META-regionserver/b712f9af2c12:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-23T15:45:12,577 INFO [RS_CLOSE_META-regionserver/b712f9af2c12:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-23T15:45:12,577 DEBUG [RS_CLOSE_META-regionserver/b712f9af2c12:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732376712410Running coprocessor pre-close hooks at 1732376712410Disabling compacts and flushes for region at 1732376712410Disabling writes for close at 1732376712412 (+2 ms)Obtaining lock to block concurrent updates at 1732376712412Preparing flush snapshotting stores in 1588230740 at 1732376712412Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1377, getHeapSize=3392, getOffHeapSize=0, getCellsCount=14 at 1732376712413 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1732376712414 (+1 ms)Flushing 1588230740/info: creating writer at 1732376712414Flushing 1588230740/info: appending metadata at 1732376712443 (+29 ms)Flushing 1588230740/info: closing flushed file at 1732376712443Flushing 1588230740/ns: creating writer at 1732376712463 (+20 ms)Flushing 1588230740/ns: appending metadata at 1732376712479 (+16 ms)Flushing 1588230740/ns: closing flushed file at 1732376712479Flushing 1588230740/table: creating writer at 1732376712496 (+17 ms)Flushing 1588230740/table: appending metadata at 1732376712516 (+20 ms)Flushing 1588230740/table: closing flushed file at 1732376712516Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@8ff8d34: reopening flushed file at 1732376712532 (+16 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@535af553: reopening flushed file at 1732376712545 (+13 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@74668229: reopening flushed file at 1732376712556 (+11 ms)Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 158ms, sequenceid=11, compaction requested=false at 1732376712571 (+15 ms)Writing region close event to WAL at 1732376712572 (+1 ms)Running coprocessor post-close hooks at 1732376712577 (+5 ms)Closed at 1732376712577 2024-11-23T15:45:12,577 DEBUG [RS_CLOSE_META-regionserver/b712f9af2c12:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-23T15:45:12,610 INFO [RS:1;b712f9af2c12:42583 {}] regionserver.HRegionServer(976): stopping server b712f9af2c12,42583,1732376709984; all regions closed. 2024-11-23T15:45:12,610 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T15:45:12,611 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T15:45:12,611 INFO [RS:2;b712f9af2c12:45655 {}] regionserver.HRegionServer(976): stopping server b712f9af2c12,45655,1732376710020; all regions closed. 2024-11-23T15:45:12,611 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T15:45:12,611 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T15:45:12,611 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T15:45:12,611 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T15:45:12,611 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T15:45:12,611 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T15:45:12,611 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T15:45:12,612 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T15:45:12,614 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34053 is added to blk_1073741833_1009 (size=1298) 2024-11-23T15:45:12,615 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34053 is added to blk_1073741836_1012 (size=2751) 2024-11-23T15:45:12,615 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741836_1012 (size=2751) 2024-11-23T15:45:12,615 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33725 is added to blk_1073741833_1009 (size=1298) 2024-11-23T15:45:12,616 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741833_1009 (size=1298) 2024-11-23T15:45:12,616 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33725 is added to blk_1073741836_1012 (size=2751) 2024-11-23T15:45:12,618 DEBUG [RS:1;b712f9af2c12:42583 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/7dbe2139-61bf-3334-cb98-f07ff23a0452/oldWALs 2024-11-23T15:45:12,618 INFO [RS:1;b712f9af2c12:42583 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog b712f9af2c12%2C42583%2C1732376709984:(num 1732376710853) 2024-11-23T15:45:12,618 DEBUG [RS:1;b712f9af2c12:42583 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T15:45:12,618 INFO [RS:1;b712f9af2c12:42583 {}] regionserver.LeaseManager(133): Closed leases 2024-11-23T15:45:12,619 INFO [RS:1;b712f9af2c12:42583 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-23T15:45:12,619 INFO [RS:1;b712f9af2c12:42583 {}] hbase.ChoreService(370): Chore service for: regionserver/b712f9af2c12:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-23T15:45:12,619 INFO [RS:1;b712f9af2c12:42583 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-23T15:45:12,619 INFO [RS:1;b712f9af2c12:42583 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-23T15:45:12,619 INFO [RS:1;b712f9af2c12:42583 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-23T15:45:12,619 INFO [RS:1;b712f9af2c12:42583 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-23T15:45:12,619 INFO [RS:1;b712f9af2c12:42583 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:42583 2024-11-23T15:45:12,620 INFO [regionserver/b712f9af2c12:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-23T15:45:12,649 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42583-0x10169a85d4b0002, quorum=127.0.0.1:53351, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/b712f9af2c12,42583,1732376709984 2024-11-23T15:45:12,649 INFO [RS:1;b712f9af2c12:42583 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-23T15:45:12,649 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46737-0x10169a85d4b0000, quorum=127.0.0.1:53351, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-23T15:45:12,657 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [b712f9af2c12,42583,1732376709984] 2024-11-23T15:45:12,665 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/b712f9af2c12,42583,1732376709984 already deleted, retry=false 2024-11-23T15:45:12,666 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; b712f9af2c12,42583,1732376709984 expired; onlineServers=1 2024-11-23T15:45:12,718 INFO [regionserver/b712f9af2c12:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-23T15:45:12,718 INFO [regionserver/b712f9af2c12:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-23T15:45:12,757 INFO [RS:1;b712f9af2c12:42583 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-23T15:45:12,757 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42583-0x10169a85d4b0002, quorum=127.0.0.1:53351, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-23T15:45:12,757 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42583-0x10169a85d4b0002, quorum=127.0.0.1:53351, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-23T15:45:12,757 INFO [RS:1;b712f9af2c12:42583 {}] regionserver.HRegionServer(1031): Exiting; stopping=b712f9af2c12,42583,1732376709984; zookeeper connection closed. 2024-11-23T15:45:12,758 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@1a205cb1 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@1a205cb1 2024-11-23T15:45:13,019 DEBUG [RS:2;b712f9af2c12:45655 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/7dbe2139-61bf-3334-cb98-f07ff23a0452/oldWALs 2024-11-23T15:45:13,019 INFO [RS:2;b712f9af2c12:45655 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog b712f9af2c12%2C45655%2C1732376710020.meta:.meta(num 1732376711132) 2024-11-23T15:45:13,020 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T15:45:13,020 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T15:45:13,020 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T15:45:13,020 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T15:45:13,020 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T15:45:13,022 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33725 is added to blk_1073741835_1011 (size=93) 2024-11-23T15:45:13,023 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741835_1011 (size=93) 2024-11-23T15:45:13,023 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34053 is added to blk_1073741835_1011 (size=93) 2024-11-23T15:45:13,025 DEBUG [RS:2;b712f9af2c12:45655 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/7dbe2139-61bf-3334-cb98-f07ff23a0452/oldWALs 2024-11-23T15:45:13,025 INFO [RS:2;b712f9af2c12:45655 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog b712f9af2c12%2C45655%2C1732376710020:(num 1732376710859) 2024-11-23T15:45:13,025 DEBUG [RS:2;b712f9af2c12:45655 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T15:45:13,025 INFO [RS:2;b712f9af2c12:45655 {}] regionserver.LeaseManager(133): Closed leases 2024-11-23T15:45:13,026 INFO [RS:2;b712f9af2c12:45655 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-23T15:45:13,026 INFO [RS:2;b712f9af2c12:45655 {}] hbase.ChoreService(370): Chore service for: regionserver/b712f9af2c12:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-23T15:45:13,026 INFO [RS:2;b712f9af2c12:45655 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-23T15:45:13,026 INFO [regionserver/b712f9af2c12:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-23T15:45:13,026 INFO [RS:2;b712f9af2c12:45655 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:45655 2024-11-23T15:45:13,057 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46737-0x10169a85d4b0000, quorum=127.0.0.1:53351, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-23T15:45:13,057 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45655-0x10169a85d4b0003, quorum=127.0.0.1:53351, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/b712f9af2c12,45655,1732376710020 2024-11-23T15:45:13,057 INFO [RS:2;b712f9af2c12:45655 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-23T15:45:13,066 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [b712f9af2c12,45655,1732376710020] 2024-11-23T15:45:13,074 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/b712f9af2c12,45655,1732376710020 already deleted, retry=false 2024-11-23T15:45:13,074 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; b712f9af2c12,45655,1732376710020 expired; onlineServers=0 2024-11-23T15:45:13,074 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'b712f9af2c12,46737,1732376709795' ***** 2024-11-23T15:45:13,074 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-23T15:45:13,074 INFO [M:0;b712f9af2c12:46737 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-23T15:45:13,074 INFO [M:0;b712f9af2c12:46737 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-23T15:45:13,075 DEBUG [M:0;b712f9af2c12:46737 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-23T15:45:13,075 DEBUG [M:0;b712f9af2c12:46737 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-23T15:45:13,075 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-23T15:45:13,075 DEBUG [master/b712f9af2c12:0:becomeActiveMaster-HFileCleaner.small.0-1732376710469 {}] cleaner.HFileCleaner(306): Exit Thread[master/b712f9af2c12:0:becomeActiveMaster-HFileCleaner.small.0-1732376710469,5,FailOnTimeoutGroup] 2024-11-23T15:45:13,075 DEBUG [master/b712f9af2c12:0:becomeActiveMaster-HFileCleaner.large.0-1732376710469 {}] cleaner.HFileCleaner(306): Exit Thread[master/b712f9af2c12:0:becomeActiveMaster-HFileCleaner.large.0-1732376710469,5,FailOnTimeoutGroup] 2024-11-23T15:45:13,075 INFO [M:0;b712f9af2c12:46737 {}] hbase.ChoreService(370): Chore service for: master/b712f9af2c12:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-23T15:45:13,075 INFO [M:0;b712f9af2c12:46737 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-23T15:45:13,075 DEBUG [M:0;b712f9af2c12:46737 {}] master.HMaster(1795): Stopping service threads 2024-11-23T15:45:13,075 INFO [M:0;b712f9af2c12:46737 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-23T15:45:13,075 INFO [M:0;b712f9af2c12:46737 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-23T15:45:13,075 INFO [M:0;b712f9af2c12:46737 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-23T15:45:13,076 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-23T15:45:13,082 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46737-0x10169a85d4b0000, quorum=127.0.0.1:53351, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-23T15:45:13,082 DEBUG [M:0;b712f9af2c12:46737 {}] zookeeper.ZKUtil(347): master:46737-0x10169a85d4b0000, quorum=127.0.0.1:53351, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-23T15:45:13,082 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46737-0x10169a85d4b0000, quorum=127.0.0.1:53351, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T15:45:13,082 WARN [M:0;b712f9af2c12:46737 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-23T15:45:13,083 INFO [M:0;b712f9af2c12:46737 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:37709/user/jenkins/test-data/7dbe2139-61bf-3334-cb98-f07ff23a0452/.lastflushedseqids 2024-11-23T15:45:13,090 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34053 is added to blk_1073741843_1019 (size=127) 2024-11-23T15:45:13,090 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33725 is added to blk_1073741843_1019 (size=127) 2024-11-23T15:45:13,091 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741843_1019 (size=127) 2024-11-23T15:45:13,092 INFO [M:0;b712f9af2c12:46737 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-23T15:45:13,092 INFO [M:0;b712f9af2c12:46737 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-23T15:45:13,092 DEBUG [M:0;b712f9af2c12:46737 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-23T15:45:13,092 INFO [M:0;b712f9af2c12:46737 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-23T15:45:13,092 DEBUG [M:0;b712f9af2c12:46737 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-23T15:45:13,093 DEBUG [M:0;b712f9af2c12:46737 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 1 ms 2024-11-23T15:45:13,093 DEBUG [M:0;b712f9af2c12:46737 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-23T15:45:13,093 INFO [M:0;b712f9af2c12:46737 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=26.82 KB heapSize=34.11 KB 2024-11-23T15:45:13,117 DEBUG [M:0;b712f9af2c12:46737 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37709/user/jenkins/test-data/7dbe2139-61bf-3334-cb98-f07ff23a0452/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/bcfa9eb76bcb4785a4996b1f8882c820 is 82, key is hbase:meta,,1/info:regioninfo/1732376711171/Put/seqid=0 2024-11-23T15:45:13,132 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741844_1020 (size=5672) 2024-11-23T15:45:13,132 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33725 is added to blk_1073741844_1020 (size=5672) 2024-11-23T15:45:13,132 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34053 is added to blk_1073741844_1020 (size=5672) 2024-11-23T15:45:13,133 INFO [M:0;b712f9af2c12:46737 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:37709/user/jenkins/test-data/7dbe2139-61bf-3334-cb98-f07ff23a0452/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/bcfa9eb76bcb4785a4996b1f8882c820 2024-11-23T15:45:13,157 DEBUG [M:0;b712f9af2c12:46737 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37709/user/jenkins/test-data/7dbe2139-61bf-3334-cb98-f07ff23a0452/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/cc7e9f60b71348ccae3b670d827f535b is 747, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1732376711805/Put/seqid=0 2024-11-23T15:45:13,166 INFO [RS:2;b712f9af2c12:45655 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-23T15:45:13,166 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45655-0x10169a85d4b0003, quorum=127.0.0.1:53351, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-23T15:45:13,166 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45655-0x10169a85d4b0003, quorum=127.0.0.1:53351, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-23T15:45:13,166 INFO [RS:2;b712f9af2c12:45655 {}] regionserver.HRegionServer(1031): Exiting; stopping=b712f9af2c12,45655,1732376710020; zookeeper connection closed. 2024-11-23T15:45:13,169 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@41baf16c {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@41baf16c 2024-11-23T15:45:13,169 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 3 regionserver(s) complete 2024-11-23T15:45:13,171 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34053 is added to blk_1073741845_1021 (size=6438) 2024-11-23T15:45:13,171 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33725 is added to blk_1073741845_1021 (size=6438) 2024-11-23T15:45:13,171 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741845_1021 (size=6438) 2024-11-23T15:45:13,173 INFO [M:0;b712f9af2c12:46737 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.13 KB at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:37709/user/jenkins/test-data/7dbe2139-61bf-3334-cb98-f07ff23a0452/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/cc7e9f60b71348ccae3b670d827f535b 2024-11-23T15:45:13,197 DEBUG [M:0;b712f9af2c12:46737 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37709/user/jenkins/test-data/7dbe2139-61bf-3334-cb98-f07ff23a0452/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/11d4ee8e55464e88a61be350dde56acd is 69, key is b712f9af2c12,42451,1732376709944/rs:state/1732376710622/Put/seqid=0 2024-11-23T15:45:13,210 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34053 is added to blk_1073741846_1022 (size=5294) 2024-11-23T15:45:13,211 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741846_1022 (size=5294) 2024-11-23T15:45:13,211 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33725 is added to blk_1073741846_1022 (size=5294) 2024-11-23T15:45:13,211 INFO [M:0;b712f9af2c12:46737 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=195 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:37709/user/jenkins/test-data/7dbe2139-61bf-3334-cb98-f07ff23a0452/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/11d4ee8e55464e88a61be350dde56acd 2024-11-23T15:45:13,219 DEBUG [M:0;b712f9af2c12:46737 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37709/user/jenkins/test-data/7dbe2139-61bf-3334-cb98-f07ff23a0452/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/bcfa9eb76bcb4785a4996b1f8882c820 as hdfs://localhost:37709/user/jenkins/test-data/7dbe2139-61bf-3334-cb98-f07ff23a0452/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/bcfa9eb76bcb4785a4996b1f8882c820 2024-11-23T15:45:13,228 INFO [M:0;b712f9af2c12:46737 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37709/user/jenkins/test-data/7dbe2139-61bf-3334-cb98-f07ff23a0452/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/bcfa9eb76bcb4785a4996b1f8882c820, entries=8, sequenceid=72, filesize=5.5 K 2024-11-23T15:45:13,229 DEBUG [M:0;b712f9af2c12:46737 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37709/user/jenkins/test-data/7dbe2139-61bf-3334-cb98-f07ff23a0452/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/cc7e9f60b71348ccae3b670d827f535b as hdfs://localhost:37709/user/jenkins/test-data/7dbe2139-61bf-3334-cb98-f07ff23a0452/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/cc7e9f60b71348ccae3b670d827f535b 2024-11-23T15:45:13,236 INFO [M:0;b712f9af2c12:46737 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37709/user/jenkins/test-data/7dbe2139-61bf-3334-cb98-f07ff23a0452/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/cc7e9f60b71348ccae3b670d827f535b, entries=8, sequenceid=72, filesize=6.3 K 2024-11-23T15:45:13,238 DEBUG [M:0;b712f9af2c12:46737 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37709/user/jenkins/test-data/7dbe2139-61bf-3334-cb98-f07ff23a0452/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/11d4ee8e55464e88a61be350dde56acd as hdfs://localhost:37709/user/jenkins/test-data/7dbe2139-61bf-3334-cb98-f07ff23a0452/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/11d4ee8e55464e88a61be350dde56acd 2024-11-23T15:45:13,248 INFO [M:0;b712f9af2c12:46737 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37709/user/jenkins/test-data/7dbe2139-61bf-3334-cb98-f07ff23a0452/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/11d4ee8e55464e88a61be350dde56acd, entries=3, sequenceid=72, filesize=5.2 K 2024-11-23T15:45:13,249 INFO [M:0;b712f9af2c12:46737 {}] regionserver.HRegion(3140): Finished flush of dataSize ~26.82 KB/27459, heapSize ~33.81 KB/34624, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 156ms, sequenceid=72, compaction requested=false 2024-11-23T15:45:13,250 INFO [M:0;b712f9af2c12:46737 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-23T15:45:13,250 DEBUG [M:0;b712f9af2c12:46737 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732376713092Disabling compacts and flushes for region at 1732376713092Disabling writes for close at 1732376713093 (+1 ms)Obtaining lock to block concurrent updates at 1732376713093Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732376713093Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=27459, getHeapSize=34864, getOffHeapSize=0, getCellsCount=85 at 1732376713093Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732376713094 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732376713094Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732376713116 (+22 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732376713116Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732376713139 (+23 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732376713156 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732376713156Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732376713181 (+25 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732376713197 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732376713197Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4def67a8: reopening flushed file at 1732376713218 (+21 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@308c04ee: reopening flushed file at 1732376713228 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3b61b2e2: reopening flushed file at 1732376713236 (+8 ms)Finished flush of dataSize ~26.82 KB/27459, heapSize ~33.81 KB/34624, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 156ms, sequenceid=72, compaction requested=false at 1732376713249 (+13 ms)Writing region close event to WAL at 1732376713250 (+1 ms)Closed at 1732376713250 2024-11-23T15:45:13,251 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T15:45:13,251 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T15:45:13,251 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T15:45:13,251 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T15:45:13,251 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T15:45:13,254 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34053 is added to blk_1073741830_1006 (size=32662) 2024-11-23T15:45:13,254 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38243 is added to blk_1073741830_1006 (size=32662) 2024-11-23T15:45:13,254 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33725 is added to blk_1073741830_1006 (size=32662) 2024-11-23T15:45:13,255 INFO [M:0;b712f9af2c12:46737 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-23T15:45:13,255 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-23T15:45:13,255 INFO [M:0;b712f9af2c12:46737 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:46737 2024-11-23T15:45:13,255 INFO [M:0;b712f9af2c12:46737 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-23T15:45:13,382 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46737-0x10169a85d4b0000, quorum=127.0.0.1:53351, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-23T15:45:13,382 INFO [M:0;b712f9af2c12:46737 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-23T15:45:13,382 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46737-0x10169a85d4b0000, quorum=127.0.0.1:53351, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-23T15:45:13,385 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2347a2a6{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-23T15:45:13,386 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4c70ad6a{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-23T15:45:13,386 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-23T15:45:13,386 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4b1a6d57{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-23T15:45:13,386 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@78313797{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/55b2995e-c92f-e507-f4f6-4dec8ffd6cfb/hadoop.log.dir/,STOPPED} 2024-11-23T15:45:13,392 WARN [BP-269514553-172.17.0.2-1732376707658 heartbeating to localhost/127.0.0.1:37709 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-23T15:45:13,392 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-23T15:45:13,392 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-23T15:45:13,392 WARN [BP-269514553-172.17.0.2-1732376707658 heartbeating to localhost/127.0.0.1:37709 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-269514553-172.17.0.2-1732376707658 (Datanode Uuid 35f8d034-46dc-453d-abcb-b1541d6ec161) service to localhost/127.0.0.1:37709 2024-11-23T15:45:13,393 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/55b2995e-c92f-e507-f4f6-4dec8ffd6cfb/cluster_51d8d5c7-f8ba-5e46-1aba-1184a122a7a1/data/data5/current/BP-269514553-172.17.0.2-1732376707658 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-23T15:45:13,393 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/55b2995e-c92f-e507-f4f6-4dec8ffd6cfb/cluster_51d8d5c7-f8ba-5e46-1aba-1184a122a7a1/data/data6/current/BP-269514553-172.17.0.2-1732376707658 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-23T15:45:13,393 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-23T15:45:13,401 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@128bbfcc{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-23T15:45:13,401 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2dba742a{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-23T15:45:13,401 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-23T15:45:13,402 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@435873d2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-23T15:45:13,402 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@31c5f8b8{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/55b2995e-c92f-e507-f4f6-4dec8ffd6cfb/hadoop.log.dir/,STOPPED} 2024-11-23T15:45:13,403 WARN [BP-269514553-172.17.0.2-1732376707658 heartbeating to localhost/127.0.0.1:37709 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-23T15:45:13,403 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-23T15:45:13,403 WARN [BP-269514553-172.17.0.2-1732376707658 heartbeating to localhost/127.0.0.1:37709 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-269514553-172.17.0.2-1732376707658 (Datanode Uuid 8a3d0751-f6f1-46ca-b2f7-97c80235db63) service to localhost/127.0.0.1:37709 2024-11-23T15:45:13,403 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-23T15:45:13,404 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/55b2995e-c92f-e507-f4f6-4dec8ffd6cfb/cluster_51d8d5c7-f8ba-5e46-1aba-1184a122a7a1/data/data3/current/BP-269514553-172.17.0.2-1732376707658 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-23T15:45:13,404 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/55b2995e-c92f-e507-f4f6-4dec8ffd6cfb/cluster_51d8d5c7-f8ba-5e46-1aba-1184a122a7a1/data/data4/current/BP-269514553-172.17.0.2-1732376707658 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-23T15:45:13,404 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-23T15:45:13,407 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4f80cdad{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-23T15:45:13,408 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@378a2d39{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-23T15:45:13,408 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-23T15:45:13,408 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3133e029{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-23T15:45:13,408 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@208ccfcf{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/55b2995e-c92f-e507-f4f6-4dec8ffd6cfb/hadoop.log.dir/,STOPPED} 2024-11-23T15:45:13,409 WARN [BP-269514553-172.17.0.2-1732376707658 heartbeating to localhost/127.0.0.1:37709 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-23T15:45:13,409 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-23T15:45:13,409 WARN [BP-269514553-172.17.0.2-1732376707658 heartbeating to localhost/127.0.0.1:37709 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-269514553-172.17.0.2-1732376707658 (Datanode Uuid 03222bb6-ac22-4c6a-8b78-4a3f05155367) service to localhost/127.0.0.1:37709 2024-11-23T15:45:13,409 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-23T15:45:13,410 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/55b2995e-c92f-e507-f4f6-4dec8ffd6cfb/cluster_51d8d5c7-f8ba-5e46-1aba-1184a122a7a1/data/data1/current/BP-269514553-172.17.0.2-1732376707658 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-23T15:45:13,410 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/55b2995e-c92f-e507-f4f6-4dec8ffd6cfb/cluster_51d8d5c7-f8ba-5e46-1aba-1184a122a7a1/data/data2/current/BP-269514553-172.17.0.2-1732376707658 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-23T15:45:13,410 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-23T15:45:13,415 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1e35858{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-23T15:45:13,416 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3dacdd96{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-23T15:45:13,416 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-23T15:45:13,416 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@180646a5{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-23T15:45:13,416 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@c2635ff{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/55b2995e-c92f-e507-f4f6-4dec8ffd6cfb/hadoop.log.dir/,STOPPED} 2024-11-23T15:45:13,426 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-23T15:45:13,452 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-23T15:45:13,460 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestHBaseWalOnEC#testReadWrite[1] Thread=146 (was 85) - Thread LEAK? -, OpenFileDescriptor=516 (was 435) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=352 (was 322) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=7638 (was 7907)