2024-12-11 20:13:31,413 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba 2024-12-11 20:13:31,428 main DEBUG Took 0.012195 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-12-11 20:13:31,428 main DEBUG PluginManager 'Core' found 129 plugins 2024-12-11 20:13:31,429 main DEBUG PluginManager 'Level' found 0 plugins 2024-12-11 20:13:31,429 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-12-11 20:13:31,431 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-11 20:13:31,442 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-12-11 20:13:31,464 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-11 20:13:31,465 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-11 20:13:31,466 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-11 20:13:31,467 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-11 20:13:31,467 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-11 20:13:31,468 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-11 20:13:31,469 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-11 20:13:31,469 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-11 20:13:31,470 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-11 20:13:31,471 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-11 20:13:31,472 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-11 20:13:31,472 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-11 20:13:31,473 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-11 20:13:31,473 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-11 20:13:31,475 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-11 20:13:31,475 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-11 20:13:31,476 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-11 20:13:31,477 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-11 20:13:31,477 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-11 20:13:31,478 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-11 20:13:31,479 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-11 20:13:31,479 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-11 20:13:31,480 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-11 20:13:31,480 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-11 20:13:31,481 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-11 20:13:31,481 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-12-11 20:13:31,484 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-11 20:13:31,486 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-12-11 20:13:31,488 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-12-11 20:13:31,489 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-12-11 20:13:31,491 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-12-11 20:13:31,491 main DEBUG PluginManager 'Converter' found 47 plugins 2024-12-11 20:13:31,502 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-12-11 20:13:31,505 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-12-11 20:13:31,507 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-12-11 20:13:31,508 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-12-11 20:13:31,508 main DEBUG createAppenders(={Console}) 2024-12-11 20:13:31,510 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba initialized 2024-12-11 20:13:31,510 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba 2024-12-11 20:13:31,510 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba OK. 2024-12-11 20:13:31,511 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-12-11 20:13:31,511 main DEBUG OutputStream closed 2024-12-11 20:13:31,512 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-12-11 20:13:31,512 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-12-11 20:13:31,512 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@49c7b90e OK 2024-12-11 20:13:31,577 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-12-11 20:13:31,580 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-12-11 20:13:31,580 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-12-11 20:13:31,581 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-12-11 20:13:31,582 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-12-11 20:13:31,582 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-12-11 20:13:31,582 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-12-11 20:13:31,582 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-12-11 20:13:31,583 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-12-11 20:13:31,583 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-12-11 20:13:31,583 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-12-11 20:13:31,583 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-12-11 20:13:31,584 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-12-11 20:13:31,584 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-12-11 20:13:31,584 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-12-11 20:13:31,584 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-12-11 20:13:31,585 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-12-11 20:13:31,586 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-12-11 20:13:31,588 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-11 20:13:31,588 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-logging/target/hbase-logging-4.0.0-alpha-1-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@35432107) with optional ClassLoader: null 2024-12-11 20:13:31,588 main DEBUG Shutdown hook enabled. Registering a new one. 2024-12-11 20:13:31,589 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@35432107] started OK. 2024-12-11T20:13:31,601 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC timeout: 26 mins 2024-12-11 20:13:31,604 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-12-11 20:13:31,604 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-11T20:13:31,853 DEBUG [main {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f65357e6-5c86-3670-a7ed-6bc039014a37 2024-12-11T20:13:31,883 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f65357e6-5c86-3670-a7ed-6bc039014a37/cluster_8ea2a013-5a77-5a09-3916-877d4fbe9f5c, deleteOnExit=true 2024-12-11T20:13:31,884 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f65357e6-5c86-3670-a7ed-6bc039014a37/test.cache.data in system properties and HBase conf 2024-12-11T20:13:31,885 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f65357e6-5c86-3670-a7ed-6bc039014a37/hadoop.tmp.dir in system properties and HBase conf 2024-12-11T20:13:31,886 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f65357e6-5c86-3670-a7ed-6bc039014a37/hadoop.log.dir in system properties and HBase conf 2024-12-11T20:13:31,887 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f65357e6-5c86-3670-a7ed-6bc039014a37/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-11T20:13:31,887 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f65357e6-5c86-3670-a7ed-6bc039014a37/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-11T20:13:31,888 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-11T20:13:32,000 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-12-11T20:13:32,085 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-11T20:13:32,090 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f65357e6-5c86-3670-a7ed-6bc039014a37/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-11T20:13:32,090 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f65357e6-5c86-3670-a7ed-6bc039014a37/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-11T20:13:32,091 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f65357e6-5c86-3670-a7ed-6bc039014a37/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-11T20:13:32,092 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f65357e6-5c86-3670-a7ed-6bc039014a37/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-11T20:13:32,093 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f65357e6-5c86-3670-a7ed-6bc039014a37/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-11T20:13:32,093 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f65357e6-5c86-3670-a7ed-6bc039014a37/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-11T20:13:32,094 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f65357e6-5c86-3670-a7ed-6bc039014a37/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-11T20:13:32,094 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f65357e6-5c86-3670-a7ed-6bc039014a37/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-11T20:13:32,095 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f65357e6-5c86-3670-a7ed-6bc039014a37/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-11T20:13:32,096 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f65357e6-5c86-3670-a7ed-6bc039014a37/nfs.dump.dir in system properties and HBase conf 2024-12-11T20:13:32,096 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f65357e6-5c86-3670-a7ed-6bc039014a37/java.io.tmpdir in system properties and HBase conf 2024-12-11T20:13:32,097 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f65357e6-5c86-3670-a7ed-6bc039014a37/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-11T20:13:32,097 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f65357e6-5c86-3670-a7ed-6bc039014a37/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-11T20:13:32,098 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f65357e6-5c86-3670-a7ed-6bc039014a37/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-11T20:13:33,130 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-12-11T20:13:33,204 INFO [Time-limited test {}] log.Log(170): Logging initialized @2476ms to org.eclipse.jetty.util.log.Slf4jLog 2024-12-11T20:13:33,267 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-11T20:13:33,339 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-11T20:13:33,364 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-11T20:13:33,364 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-11T20:13:33,366 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-11T20:13:33,379 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-11T20:13:33,383 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2b4eb733{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f65357e6-5c86-3670-a7ed-6bc039014a37/hadoop.log.dir/,AVAILABLE} 2024-12-11T20:13:33,384 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@c6a701e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-11T20:13:33,555 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@753cff0b{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f65357e6-5c86-3670-a7ed-6bc039014a37/java.io.tmpdir/jetty-localhost-44085-hadoop-hdfs-3_4_1-tests_jar-_-any-2991064773041303283/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-11T20:13:33,564 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@78567fa0{HTTP/1.1, (http/1.1)}{localhost:44085} 2024-12-11T20:13:33,564 INFO [Time-limited test {}] server.Server(415): Started @2836ms 2024-12-11T20:13:34,121 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-11T20:13:34,127 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-11T20:13:34,128 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-11T20:13:34,129 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-11T20:13:34,129 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-11T20:13:34,130 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@26fd7980{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f65357e6-5c86-3670-a7ed-6bc039014a37/hadoop.log.dir/,AVAILABLE} 2024-12-11T20:13:34,131 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4802e856{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-11T20:13:34,226 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@68c42837{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f65357e6-5c86-3670-a7ed-6bc039014a37/java.io.tmpdir/jetty-localhost-39301-hadoop-hdfs-3_4_1-tests_jar-_-any-11740974939441468450/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-11T20:13:34,226 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@736038db{HTTP/1.1, (http/1.1)}{localhost:39301} 2024-12-11T20:13:34,227 INFO [Time-limited test {}] server.Server(415): Started @3499ms 2024-12-11T20:13:34,272 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-11T20:13:34,386 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-11T20:13:34,391 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-11T20:13:34,392 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-11T20:13:34,392 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-11T20:13:34,393 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-11T20:13:34,393 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1f0232ec{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f65357e6-5c86-3670-a7ed-6bc039014a37/hadoop.log.dir/,AVAILABLE} 2024-12-11T20:13:34,394 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@31fc7e57{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-11T20:13:34,499 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@51be63ee{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f65357e6-5c86-3670-a7ed-6bc039014a37/java.io.tmpdir/jetty-localhost-37691-hadoop-hdfs-3_4_1-tests_jar-_-any-10954438521585337861/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-11T20:13:34,500 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@106158ef{HTTP/1.1, (http/1.1)}{localhost:37691} 2024-12-11T20:13:34,500 INFO [Time-limited test {}] server.Server(415): Started @3772ms 2024-12-11T20:13:34,502 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-11T20:13:34,553 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-11T20:13:34,560 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-11T20:13:34,567 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-11T20:13:34,568 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-11T20:13:34,568 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-11T20:13:34,570 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@77df1a06{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f65357e6-5c86-3670-a7ed-6bc039014a37/hadoop.log.dir/,AVAILABLE} 2024-12-11T20:13:34,571 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@a4f4410{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-11T20:13:34,670 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6d005cc8{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f65357e6-5c86-3670-a7ed-6bc039014a37/java.io.tmpdir/jetty-localhost-33159-hadoop-hdfs-3_4_1-tests_jar-_-any-10967292014437668243/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-11T20:13:34,671 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@492d1201{HTTP/1.1, (http/1.1)}{localhost:33159} 2024-12-11T20:13:34,671 INFO [Time-limited test {}] server.Server(415): Started @3944ms 2024-12-11T20:13:34,673 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-11T20:13:35,556 WARN [Thread-122 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f65357e6-5c86-3670-a7ed-6bc039014a37/cluster_8ea2a013-5a77-5a09-3916-877d4fbe9f5c/data/data3/current/BP-1304524823-172.17.0.2-1733948012709/current, will proceed with Du for space computation calculation, 2024-12-11T20:13:35,556 WARN [Thread-123 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f65357e6-5c86-3670-a7ed-6bc039014a37/cluster_8ea2a013-5a77-5a09-3916-877d4fbe9f5c/data/data4/current/BP-1304524823-172.17.0.2-1733948012709/current, will proceed with Du for space computation calculation, 2024-12-11T20:13:35,556 WARN [Thread-125 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f65357e6-5c86-3670-a7ed-6bc039014a37/cluster_8ea2a013-5a77-5a09-3916-877d4fbe9f5c/data/data2/current/BP-1304524823-172.17.0.2-1733948012709/current, will proceed with Du for space computation calculation, 2024-12-11T20:13:35,556 WARN [Thread-124 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f65357e6-5c86-3670-a7ed-6bc039014a37/cluster_8ea2a013-5a77-5a09-3916-877d4fbe9f5c/data/data1/current/BP-1304524823-172.17.0.2-1733948012709/current, will proceed with Du for space computation calculation, 2024-12-11T20:13:35,588 WARN [Thread-136 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f65357e6-5c86-3670-a7ed-6bc039014a37/cluster_8ea2a013-5a77-5a09-3916-877d4fbe9f5c/data/data5/current/BP-1304524823-172.17.0.2-1733948012709/current, will proceed with Du for space computation calculation, 2024-12-11T20:13:35,588 WARN [Thread-137 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f65357e6-5c86-3670-a7ed-6bc039014a37/cluster_8ea2a013-5a77-5a09-3916-877d4fbe9f5c/data/data6/current/BP-1304524823-172.17.0.2-1733948012709/current, will proceed with Du for space computation calculation, 2024-12-11T20:13:35,589 WARN [Thread-81 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-11T20:13:35,589 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-11T20:13:35,612 WARN [Thread-103 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-11T20:13:35,637 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xf2af932dc8655aca with lease ID 0x790adac8f0d31155: Processing first storage report for DS-af130960-5ef3-4fcb-8a05-e36235713240 from datanode DatanodeRegistration(127.0.0.1:33711, datanodeUuid=8ad5220c-4cf3-4aa7-9a31-86d49a2e410d, infoPort=42127, infoSecurePort=0, ipcPort=36441, storageInfo=lv=-57;cid=testClusterID;nsid=1819168369;c=1733948012709) 2024-12-11T20:13:35,639 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf2af932dc8655aca with lease ID 0x790adac8f0d31155: from storage DS-af130960-5ef3-4fcb-8a05-e36235713240 node DatanodeRegistration(127.0.0.1:33711, datanodeUuid=8ad5220c-4cf3-4aa7-9a31-86d49a2e410d, infoPort=42127, infoSecurePort=0, ipcPort=36441, storageInfo=lv=-57;cid=testClusterID;nsid=1819168369;c=1733948012709), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-11T20:13:35,639 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x978ce0201aea099b with lease ID 0x790adac8f0d31156: Processing first storage report for DS-5657614d-cdac-4dfa-ab74-caa716a14611 from datanode DatanodeRegistration(127.0.0.1:32959, datanodeUuid=f0fb4ded-ec4d-4d60-931d-f945b45cd7d6, infoPort=46787, infoSecurePort=0, ipcPort=43097, storageInfo=lv=-57;cid=testClusterID;nsid=1819168369;c=1733948012709) 2024-12-11T20:13:35,639 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x978ce0201aea099b with lease ID 0x790adac8f0d31156: from storage DS-5657614d-cdac-4dfa-ab74-caa716a14611 node DatanodeRegistration(127.0.0.1:32959, datanodeUuid=f0fb4ded-ec4d-4d60-931d-f945b45cd7d6, infoPort=46787, infoSecurePort=0, ipcPort=43097, storageInfo=lv=-57;cid=testClusterID;nsid=1819168369;c=1733948012709), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-11T20:13:35,639 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x4f8abddc1d5bb3de with lease ID 0x790adac8f0d31157: Processing first storage report for DS-24c49129-1211-49ca-801c-19fe9af7aa06 from datanode DatanodeRegistration(127.0.0.1:43873, datanodeUuid=b4bcb877-8c42-4b47-b2b4-c6fbb4af4428, infoPort=40373, infoSecurePort=0, ipcPort=40421, storageInfo=lv=-57;cid=testClusterID;nsid=1819168369;c=1733948012709) 2024-12-11T20:13:35,639 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x4f8abddc1d5bb3de with lease ID 0x790adac8f0d31157: from storage DS-24c49129-1211-49ca-801c-19fe9af7aa06 node DatanodeRegistration(127.0.0.1:43873, datanodeUuid=b4bcb877-8c42-4b47-b2b4-c6fbb4af4428, infoPort=40373, infoSecurePort=0, ipcPort=40421, storageInfo=lv=-57;cid=testClusterID;nsid=1819168369;c=1733948012709), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-11T20:13:35,640 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xf2af932dc8655aca with lease ID 0x790adac8f0d31155: Processing first storage report for DS-5a086249-31cf-4dfc-885d-d269910b7f45 from datanode DatanodeRegistration(127.0.0.1:33711, datanodeUuid=8ad5220c-4cf3-4aa7-9a31-86d49a2e410d, infoPort=42127, infoSecurePort=0, ipcPort=36441, storageInfo=lv=-57;cid=testClusterID;nsid=1819168369;c=1733948012709) 2024-12-11T20:13:35,640 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf2af932dc8655aca with lease ID 0x790adac8f0d31155: from storage DS-5a086249-31cf-4dfc-885d-d269910b7f45 node DatanodeRegistration(127.0.0.1:33711, datanodeUuid=8ad5220c-4cf3-4aa7-9a31-86d49a2e410d, infoPort=42127, infoSecurePort=0, ipcPort=36441, storageInfo=lv=-57;cid=testClusterID;nsid=1819168369;c=1733948012709), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-11T20:13:35,640 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x978ce0201aea099b with lease ID 0x790adac8f0d31156: Processing first storage report for DS-017f8242-8cf9-4d8e-8c8f-1f17330c7aa4 from datanode DatanodeRegistration(127.0.0.1:32959, datanodeUuid=f0fb4ded-ec4d-4d60-931d-f945b45cd7d6, infoPort=46787, infoSecurePort=0, ipcPort=43097, storageInfo=lv=-57;cid=testClusterID;nsid=1819168369;c=1733948012709) 2024-12-11T20:13:35,640 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x978ce0201aea099b with lease ID 0x790adac8f0d31156: from storage DS-017f8242-8cf9-4d8e-8c8f-1f17330c7aa4 node DatanodeRegistration(127.0.0.1:32959, datanodeUuid=f0fb4ded-ec4d-4d60-931d-f945b45cd7d6, infoPort=46787, infoSecurePort=0, ipcPort=43097, storageInfo=lv=-57;cid=testClusterID;nsid=1819168369;c=1733948012709), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-11T20:13:35,640 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x4f8abddc1d5bb3de with lease ID 0x790adac8f0d31157: Processing first storage report for DS-103a7b45-0d3a-4b48-8e0d-70d966a407a3 from datanode DatanodeRegistration(127.0.0.1:43873, datanodeUuid=b4bcb877-8c42-4b47-b2b4-c6fbb4af4428, infoPort=40373, infoSecurePort=0, ipcPort=40421, storageInfo=lv=-57;cid=testClusterID;nsid=1819168369;c=1733948012709) 2024-12-11T20:13:35,640 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x4f8abddc1d5bb3de with lease ID 0x790adac8f0d31157: from storage DS-103a7b45-0d3a-4b48-8e0d-70d966a407a3 node DatanodeRegistration(127.0.0.1:43873, datanodeUuid=b4bcb877-8c42-4b47-b2b4-c6fbb4af4428, infoPort=40373, infoSecurePort=0, ipcPort=40421, storageInfo=lv=-57;cid=testClusterID;nsid=1819168369;c=1733948012709), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-11T20:13:35,696 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f65357e6-5c86-3670-a7ed-6bc039014a37 2024-12-11T20:13:35,766 WARN [Time-limited test {}] erasurecode.ErasureCodeNative(55): ISA-L support is not available in your platform... using builtin-java codec where applicable 2024-12-11T20:13:35,817 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestHBaseWalOnEC#testReadWrite[0] Thread=157, OpenFileDescriptor=391, MaxFileDescriptor=1048576, SystemLoadAverage=1121, ProcessCount=11, AvailableMemoryMB=5050 2024-12-11T20:13:35,819 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=3, rsPorts=, rsClass=null, numDataNodes=3, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-11T20:13:35,828 INFO [Time-limited test {}] hbase.HBaseTestingUtil(821): NOT STARTING DFS 2024-12-11T20:13:35,930 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f65357e6-5c86-3670-a7ed-6bc039014a37/cluster_8ea2a013-5a77-5a09-3916-877d4fbe9f5c/zookeeper_0, clientPort=60082, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f65357e6-5c86-3670-a7ed-6bc039014a37/cluster_8ea2a013-5a77-5a09-3916-877d4fbe9f5c/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f65357e6-5c86-3670-a7ed-6bc039014a37/cluster_8ea2a013-5a77-5a09-3916-877d4fbe9f5c/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-11T20:13:35,943 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=60082 2024-12-11T20:13:35,953 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-11T20:13:35,966 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-11T20:13:36,072 WARN [Time-limited test {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-11T20:13:36,073 WARN [Time-limited test {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-11T20:13:36,135 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1152522736_22 at /127.0.0.1:45904 [Receiving block BP-1304524823-172.17.0.2-1733948012709:blk_-9223372036854775792_1001] {}] datanode.DataXceiver(331): 127.0.0.1:32959:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45904 dst: /127.0.0.1:32959 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T20:13:36,158 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32959 is added to blk_-9223372036854775792_1002 (size=7) 2024-12-11T20:13:36,555 WARN [Time-limited test {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-11T20:13:36,564 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:39591/user/jenkins/test-data/02e66db6-ee0b-0338-51ff-56955f597119 with version=8 2024-12-11T20:13:36,565 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:39591/user/jenkins/test-data/02e66db6-ee0b-0338-51ff-56955f597119/hbase-staging 2024-12-11T20:13:36,667 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-12-11T20:13:36,929 INFO [Time-limited test {}] client.ConnectionUtils(128): master/5be53b084ac7:0 server-side Connection retries=45 2024-12-11T20:13:36,937 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-11T20:13:36,937 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-11T20:13:36,942 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-11T20:13:36,942 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-11T20:13:36,942 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-11T20:13:37,061 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-11T20:13:37,126 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-12-11T20:13:37,134 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-12-11T20:13:37,138 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-11T20:13:37,164 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 7140 (auto-detected) 2024-12-11T20:13:37,165 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-12-11T20:13:37,181 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:38845 2024-12-11T20:13:37,200 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:38845 connecting to ZooKeeper ensemble=127.0.0.1:60082 2024-12-11T20:13:37,315 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:388450x0, quorum=127.0.0.1:60082, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-11T20:13:37,318 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:38845-0x1001690c3c70000 connected 2024-12-11T20:13:37,390 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-11T20:13:37,395 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-11T20:13:37,407 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:38845-0x1001690c3c70000, quorum=127.0.0.1:60082, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-11T20:13:37,412 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:39591/user/jenkins/test-data/02e66db6-ee0b-0338-51ff-56955f597119, hbase.cluster.distributed=false 2024-12-11T20:13:37,433 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:38845-0x1001690c3c70000, quorum=127.0.0.1:60082, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-11T20:13:37,438 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=38845 2024-12-11T20:13:37,438 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=38845 2024-12-11T20:13:37,438 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=38845 2024-12-11T20:13:37,439 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=38845 2024-12-11T20:13:37,439 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=38845 2024-12-11T20:13:37,530 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/5be53b084ac7:0 server-side Connection retries=45 2024-12-11T20:13:37,532 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-11T20:13:37,532 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-11T20:13:37,533 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-11T20:13:37,533 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-11T20:13:37,533 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-11T20:13:37,536 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-11T20:13:37,538 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-11T20:13:37,539 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:39105 2024-12-11T20:13:37,541 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:39105 connecting to ZooKeeper ensemble=127.0.0.1:60082 2024-12-11T20:13:37,542 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-11T20:13:37,549 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-11T20:13:37,577 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:391050x0, quorum=127.0.0.1:60082, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-11T20:13:37,579 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:39105-0x1001690c3c70001 connected 2024-12-11T20:13:37,581 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39105-0x1001690c3c70001, quorum=127.0.0.1:60082, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-11T20:13:37,589 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-11T20:13:37,602 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-11T20:13:37,607 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39105-0x1001690c3c70001, quorum=127.0.0.1:60082, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-11T20:13:37,614 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39105-0x1001690c3c70001, quorum=127.0.0.1:60082, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-11T20:13:37,619 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=39105 2024-12-11T20:13:37,620 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=39105 2024-12-11T20:13:37,620 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=39105 2024-12-11T20:13:37,621 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=39105 2024-12-11T20:13:37,621 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=39105 2024-12-11T20:13:37,637 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/5be53b084ac7:0 server-side Connection retries=45 2024-12-11T20:13:37,638 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-11T20:13:37,638 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-11T20:13:37,638 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-11T20:13:37,639 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-11T20:13:37,639 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-11T20:13:37,639 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-11T20:13:37,639 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-11T20:13:37,640 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:41591 2024-12-11T20:13:37,642 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:41591 connecting to ZooKeeper ensemble=127.0.0.1:60082 2024-12-11T20:13:37,643 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-11T20:13:37,646 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-11T20:13:37,678 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:415910x0, quorum=127.0.0.1:60082, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-11T20:13:37,678 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:415910x0, quorum=127.0.0.1:60082, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-11T20:13:37,678 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:41591-0x1001690c3c70002 connected 2024-12-11T20:13:37,679 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-11T20:13:37,680 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-11T20:13:37,681 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41591-0x1001690c3c70002, quorum=127.0.0.1:60082, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-11T20:13:37,683 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41591-0x1001690c3c70002, quorum=127.0.0.1:60082, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-11T20:13:37,684 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=41591 2024-12-11T20:13:37,684 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=41591 2024-12-11T20:13:37,684 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=41591 2024-12-11T20:13:37,685 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=41591 2024-12-11T20:13:37,685 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=41591 2024-12-11T20:13:37,700 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/5be53b084ac7:0 server-side Connection retries=45 2024-12-11T20:13:37,701 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-11T20:13:37,701 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-11T20:13:37,701 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-11T20:13:37,701 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-11T20:13:37,701 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-11T20:13:37,701 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-11T20:13:37,702 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-11T20:13:37,703 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:44703 2024-12-11T20:13:37,704 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:44703 connecting to ZooKeeper ensemble=127.0.0.1:60082 2024-12-11T20:13:37,705 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-11T20:13:37,709 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-11T20:13:37,720 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:447030x0, quorum=127.0.0.1:60082, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-11T20:13:37,720 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:447030x0, quorum=127.0.0.1:60082, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-11T20:13:37,720 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:44703-0x1001690c3c70003 connected 2024-12-11T20:13:37,721 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-11T20:13:37,722 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-11T20:13:37,723 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44703-0x1001690c3c70003, quorum=127.0.0.1:60082, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-11T20:13:37,725 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44703-0x1001690c3c70003, quorum=127.0.0.1:60082, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-11T20:13:37,726 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=44703 2024-12-11T20:13:37,726 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=44703 2024-12-11T20:13:37,726 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=44703 2024-12-11T20:13:37,727 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=44703 2024-12-11T20:13:37,727 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=44703 2024-12-11T20:13:37,740 DEBUG [M:0;5be53b084ac7:38845 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;5be53b084ac7:38845 2024-12-11T20:13:37,741 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/5be53b084ac7,38845,1733948016780 2024-12-11T20:13:37,753 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41591-0x1001690c3c70002, quorum=127.0.0.1:60082, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-11T20:13:37,753 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44703-0x1001690c3c70003, quorum=127.0.0.1:60082, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-11T20:13:37,753 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38845-0x1001690c3c70000, quorum=127.0.0.1:60082, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-11T20:13:37,753 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39105-0x1001690c3c70001, quorum=127.0.0.1:60082, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-11T20:13:37,756 DEBUG [master/5be53b084ac7:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:38845-0x1001690c3c70000, quorum=127.0.0.1:60082, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/5be53b084ac7,38845,1733948016780 2024-12-11T20:13:37,786 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44703-0x1001690c3c70003, quorum=127.0.0.1:60082, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-11T20:13:37,786 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38845-0x1001690c3c70000, quorum=127.0.0.1:60082, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T20:13:37,786 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41591-0x1001690c3c70002, quorum=127.0.0.1:60082, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-11T20:13:37,786 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39105-0x1001690c3c70001, quorum=127.0.0.1:60082, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-11T20:13:37,786 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44703-0x1001690c3c70003, quorum=127.0.0.1:60082, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T20:13:37,786 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41591-0x1001690c3c70002, quorum=127.0.0.1:60082, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T20:13:37,786 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39105-0x1001690c3c70001, quorum=127.0.0.1:60082, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T20:13:37,788 DEBUG [master/5be53b084ac7:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:38845-0x1001690c3c70000, quorum=127.0.0.1:60082, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-11T20:13:37,789 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/5be53b084ac7,38845,1733948016780 from backup master directory 2024-12-11T20:13:37,802 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39105-0x1001690c3c70001, quorum=127.0.0.1:60082, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-11T20:13:37,802 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38845-0x1001690c3c70000, quorum=127.0.0.1:60082, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/5be53b084ac7,38845,1733948016780 2024-12-11T20:13:37,802 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41591-0x1001690c3c70002, quorum=127.0.0.1:60082, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-11T20:13:37,802 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44703-0x1001690c3c70003, quorum=127.0.0.1:60082, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-11T20:13:37,803 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38845-0x1001690c3c70000, quorum=127.0.0.1:60082, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-11T20:13:37,803 WARN [master/5be53b084ac7:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-11T20:13:37,803 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=5be53b084ac7,38845,1733948016780 2024-12-11T20:13:37,806 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-12-11T20:13:37,808 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-12-11T20:13:37,867 DEBUG [master/5be53b084ac7:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:39591/user/jenkins/test-data/02e66db6-ee0b-0338-51ff-56955f597119/hbase.id] with ID: 1681ab22-046c-405d-adf8-ae8657f45af2 2024-12-11T20:13:37,867 DEBUG [master/5be53b084ac7:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:39591/user/jenkins/test-data/02e66db6-ee0b-0338-51ff-56955f597119/.tmp/hbase.id 2024-12-11T20:13:37,873 WARN [master/5be53b084ac7:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-11T20:13:37,874 WARN [master/5be53b084ac7:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-11T20:13:37,876 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1152522736_22 at /127.0.0.1:45932 [Receiving block BP-1304524823-172.17.0.2-1733948012709:blk_-9223372036854775776_1003] {}] datanode.DataXceiver(331): 127.0.0.1:32959:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45932 dst: /127.0.0.1:32959 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T20:13:37,883 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32959 is added to blk_-9223372036854775776_1004 (size=42) 2024-12-11T20:13:37,884 WARN [master/5be53b084ac7:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-11T20:13:37,884 DEBUG [master/5be53b084ac7:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:39591/user/jenkins/test-data/02e66db6-ee0b-0338-51ff-56955f597119/.tmp/hbase.id]:[hdfs://localhost:39591/user/jenkins/test-data/02e66db6-ee0b-0338-51ff-56955f597119/hbase.id] 2024-12-11T20:13:37,941 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-11T20:13:37,945 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-11T20:13:37,962 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 15ms. 2024-12-11T20:13:37,978 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44703-0x1001690c3c70003, quorum=127.0.0.1:60082, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T20:13:37,978 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39105-0x1001690c3c70001, quorum=127.0.0.1:60082, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T20:13:37,978 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38845-0x1001690c3c70000, quorum=127.0.0.1:60082, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T20:13:37,978 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41591-0x1001690c3c70002, quorum=127.0.0.1:60082, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T20:13:37,992 WARN [master/5be53b084ac7:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-11T20:13:37,992 WARN [master/5be53b084ac7:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-11T20:13:37,994 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1152522736_22 at /127.0.0.1:45942 [Receiving block BP-1304524823-172.17.0.2-1733948012709:blk_-9223372036854775760_1005] {}] datanode.DataXceiver(331): 127.0.0.1:32959:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45942 dst: /127.0.0.1:32959 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T20:13:37,999 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32959 is added to blk_-9223372036854775760_1006 (size=196) 2024-12-11T20:13:38,000 WARN [master/5be53b084ac7:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-11T20:13:38,014 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-11T20:13:38,015 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-11T20:13:38,020 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-11T20:13:38,045 WARN [master/5be53b084ac7:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-11T20:13:38,045 WARN [master/5be53b084ac7:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-11T20:13:38,048 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1152522736_22 at /127.0.0.1:37326 [Receiving block BP-1304524823-172.17.0.2-1733948012709:blk_-9223372036854775744_1007] {}] datanode.DataXceiver(331): 127.0.0.1:43873:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:37326 dst: /127.0.0.1:43873 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T20:13:38,054 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43873 is added to blk_-9223372036854775744_1008 (size=1189) 2024-12-11T20:13:38,054 WARN [master/5be53b084ac7:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-11T20:13:38,070 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:39591/user/jenkins/test-data/02e66db6-ee0b-0338-51ff-56955f597119/MasterData/data/master/store 2024-12-11T20:13:38,087 WARN [master/5be53b084ac7:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-11T20:13:38,087 WARN [master/5be53b084ac7:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-11T20:13:38,091 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1152522736_22 at /127.0.0.1:39858 [Receiving block BP-1304524823-172.17.0.2-1733948012709:blk_-9223372036854775728_1009] {}] datanode.DataXceiver(331): 127.0.0.1:33711:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:39858 dst: /127.0.0.1:33711 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T20:13:38,095 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33711 is added to blk_-9223372036854775728_1010 (size=34) 2024-12-11T20:13:38,096 WARN [master/5be53b084ac7:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-11T20:13:38,100 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-12-11T20:13:38,104 DEBUG [master/5be53b084ac7:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T20:13:38,105 DEBUG [master/5be53b084ac7:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-11T20:13:38,105 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-11T20:13:38,105 DEBUG [master/5be53b084ac7:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-11T20:13:38,107 DEBUG [master/5be53b084ac7:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-11T20:13:38,107 DEBUG [master/5be53b084ac7:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-11T20:13:38,107 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-11T20:13:38,109 DEBUG [master/5be53b084ac7:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733948018105Disabling compacts and flushes for region at 1733948018105Disabling writes for close at 1733948018107 (+2 ms)Writing region close event to WAL at 1733948018107Closed at 1733948018107 2024-12-11T20:13:38,111 WARN [master/5be53b084ac7:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:39591/user/jenkins/test-data/02e66db6-ee0b-0338-51ff-56955f597119/MasterData/data/master/store/.initializing 2024-12-11T20:13:38,111 DEBUG [master/5be53b084ac7:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:39591/user/jenkins/test-data/02e66db6-ee0b-0338-51ff-56955f597119/MasterData/WALs/5be53b084ac7,38845,1733948016780 2024-12-11T20:13:38,118 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-11T20:13:38,132 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=5be53b084ac7%2C38845%2C1733948016780, suffix=, logDir=hdfs://localhost:39591/user/jenkins/test-data/02e66db6-ee0b-0338-51ff-56955f597119/MasterData/WALs/5be53b084ac7,38845,1733948016780, archiveDir=hdfs://localhost:39591/user/jenkins/test-data/02e66db6-ee0b-0338-51ff-56955f597119/MasterData/oldWALs, maxLogs=10 2024-12-11T20:13:38,157 DEBUG [master/5be53b084ac7:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/02e66db6-ee0b-0338-51ff-56955f597119/MasterData/WALs/5be53b084ac7,38845,1733948016780/5be53b084ac7%2C38845%2C1733948016780.1733948018136, exclude list is [], retry=0 2024-12-11T20:13:38,176 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(244): No decryptEncryptedDataEncryptionKey method in DFSClient, should be hadoop version with HDFS-12396 java.lang.NoSuchMethodException: org.apache.hadoop.hdfs.DFSClient.decryptEncryptedDataEncryptionKey(org.apache.hadoop.fs.FileEncryptionInfo) at java.lang.Class.getDeclaredMethod(Class.java:2675) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelperWithoutHDFS12396(FanOutOneBlockAsyncDFSOutputSaslHelper.java:183) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelper(FanOutOneBlockAsyncDFSOutputSaslHelper.java:242) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.(FanOutOneBlockAsyncDFSOutputSaslHelper.java:253) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.initialize(FanOutOneBlockAsyncDFSOutputHelper.java:413) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper$5.operationComplete(FanOutOneBlockAsyncDFSOutputHelper.java:472) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper$5.operationComplete(FanOutOneBlockAsyncDFSOutputHelper.java:467) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.NettyFutureUtils.lambda$addListener$0(NettyFutureUtils.java:56) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListener0(DefaultPromise.java:590) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListeners0(DefaultPromise.java:583) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListenersNow(DefaultPromise.java:559) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListeners(DefaultPromise.java:492) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.setValue0(DefaultPromise.java:636) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.setSuccess0(DefaultPromise.java:625) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.trySuccess(DefaultPromise.java:105) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPromise.trySuccess(DefaultChannelPromise.java:84) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.fulfillConnectPromise(AbstractEpollChannel.java:658) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.finishConnect(AbstractEpollChannel.java:696) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.epollOutReady(AbstractEpollChannel.java:567) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.processReady(EpollEventLoop.java:491) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:399) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T20:13:38,177 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:33711,DS-af130960-5ef3-4fcb-8a05-e36235713240,DISK] 2024-12-11T20:13:38,177 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43873,DS-24c49129-1211-49ca-801c-19fe9af7aa06,DISK] 2024-12-11T20:13:38,177 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:32959,DS-5657614d-cdac-4dfa-ab74-caa716a14611,DISK] 2024-12-11T20:13:38,180 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.ProtobufDecoder(117): Hadoop 3.3 and above shades protobuf. 2024-12-11T20:13:38,214 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/02e66db6-ee0b-0338-51ff-56955f597119/MasterData/WALs/5be53b084ac7,38845,1733948016780/5be53b084ac7%2C38845%2C1733948016780.1733948018136 2024-12-11T20:13:38,215 DEBUG [master/5be53b084ac7:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:42127:42127),(127.0.0.1/127.0.0.1:46787:46787),(127.0.0.1/127.0.0.1:40373:40373)] 2024-12-11T20:13:38,215 DEBUG [master/5be53b084ac7:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-11T20:13:38,216 DEBUG [master/5be53b084ac7:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T20:13:38,218 DEBUG [master/5be53b084ac7:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-11T20:13:38,219 DEBUG [master/5be53b084ac7:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-11T20:13:38,253 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-11T20:13:38,276 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-11T20:13:38,279 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T20:13:38,281 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-11T20:13:38,281 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-11T20:13:38,284 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-11T20:13:38,285 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T20:13:38,286 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T20:13:38,286 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-11T20:13:38,288 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-11T20:13:38,289 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T20:13:38,289 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T20:13:38,290 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-11T20:13:38,292 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-11T20:13:38,292 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T20:13:38,293 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T20:13:38,293 DEBUG [master/5be53b084ac7:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-11T20:13:38,297 DEBUG [master/5be53b084ac7:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39591/user/jenkins/test-data/02e66db6-ee0b-0338-51ff-56955f597119/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-11T20:13:38,298 DEBUG [master/5be53b084ac7:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39591/user/jenkins/test-data/02e66db6-ee0b-0338-51ff-56955f597119/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-11T20:13:38,303 DEBUG [master/5be53b084ac7:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-11T20:13:38,304 DEBUG [master/5be53b084ac7:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-11T20:13:38,307 DEBUG [master/5be53b084ac7:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-11T20:13:38,310 DEBUG [master/5be53b084ac7:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-11T20:13:38,316 DEBUG [master/5be53b084ac7:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39591/user/jenkins/test-data/02e66db6-ee0b-0338-51ff-56955f597119/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-11T20:13:38,317 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=61620537, jitterRate=-0.08178244531154633}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-11T20:13:38,322 DEBUG [master/5be53b084ac7:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733948018230Initializing all the Stores at 1733948018232 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733948018233 (+1 ms)Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733948018233Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733948018234 (+1 ms)Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733948018234Cleaning up temporary data from old regions at 1733948018304 (+70 ms)Region opened successfully at 1733948018322 (+18 ms) 2024-12-11T20:13:38,323 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-11T20:13:38,353 DEBUG [master/5be53b084ac7:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@74f7a37e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=5be53b084ac7/172.17.0.2:0 2024-12-11T20:13:38,379 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-11T20:13:38,388 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-11T20:13:38,389 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-11T20:13:38,391 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-11T20:13:38,392 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 1 msec 2024-12-11T20:13:38,396 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 4 msec 2024-12-11T20:13:38,397 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-11T20:13:38,417 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-11T20:13:38,424 DEBUG [master/5be53b084ac7:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38845-0x1001690c3c70000, quorum=127.0.0.1:60082, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-11T20:13:38,470 DEBUG [master/5be53b084ac7:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-11T20:13:38,475 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-11T20:13:38,478 DEBUG [master/5be53b084ac7:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38845-0x1001690c3c70000, quorum=127.0.0.1:60082, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-11T20:13:38,486 DEBUG [master/5be53b084ac7:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-11T20:13:38,488 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-11T20:13:38,492 DEBUG [master/5be53b084ac7:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38845-0x1001690c3c70000, quorum=127.0.0.1:60082, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-11T20:13:38,502 DEBUG [master/5be53b084ac7:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-11T20:13:38,504 DEBUG [master/5be53b084ac7:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38845-0x1001690c3c70000, quorum=127.0.0.1:60082, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-11T20:13:38,510 DEBUG [master/5be53b084ac7:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-11T20:13:38,529 DEBUG [master/5be53b084ac7:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38845-0x1001690c3c70000, quorum=127.0.0.1:60082, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-11T20:13:38,535 DEBUG [master/5be53b084ac7:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-11T20:13:38,544 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41591-0x1001690c3c70002, quorum=127.0.0.1:60082, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-11T20:13:38,544 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44703-0x1001690c3c70003, quorum=127.0.0.1:60082, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-11T20:13:38,544 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39105-0x1001690c3c70001, quorum=127.0.0.1:60082, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-11T20:13:38,545 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41591-0x1001690c3c70002, quorum=127.0.0.1:60082, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T20:13:38,545 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44703-0x1001690c3c70003, quorum=127.0.0.1:60082, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T20:13:38,545 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38845-0x1001690c3c70000, quorum=127.0.0.1:60082, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-11T20:13:38,545 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39105-0x1001690c3c70001, quorum=127.0.0.1:60082, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T20:13:38,545 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38845-0x1001690c3c70000, quorum=127.0.0.1:60082, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T20:13:38,549 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=5be53b084ac7,38845,1733948016780, sessionid=0x1001690c3c70000, setting cluster-up flag (Was=false) 2024-12-11T20:13:38,577 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39105-0x1001690c3c70001, quorum=127.0.0.1:60082, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T20:13:38,577 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41591-0x1001690c3c70002, quorum=127.0.0.1:60082, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T20:13:38,577 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44703-0x1001690c3c70003, quorum=127.0.0.1:60082, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T20:13:38,577 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38845-0x1001690c3c70000, quorum=127.0.0.1:60082, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T20:13:38,602 DEBUG [master/5be53b084ac7:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-11T20:13:38,605 DEBUG [master/5be53b084ac7:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=5be53b084ac7,38845,1733948016780 2024-12-11T20:13:38,627 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44703-0x1001690c3c70003, quorum=127.0.0.1:60082, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T20:13:38,627 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39105-0x1001690c3c70001, quorum=127.0.0.1:60082, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T20:13:38,627 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38845-0x1001690c3c70000, quorum=127.0.0.1:60082, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T20:13:38,627 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41591-0x1001690c3c70002, quorum=127.0.0.1:60082, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T20:13:38,641 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43873 is added to blk_-9223372036854775788_1002 (size=7) 2024-12-11T20:13:38,641 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33711 is added to blk_-9223372036854775789_1002 (size=7) 2024-12-11T20:13:38,652 DEBUG [master/5be53b084ac7:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-11T20:13:38,655 DEBUG [master/5be53b084ac7:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=5be53b084ac7,38845,1733948016780 2024-12-11T20:13:38,662 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:39591/user/jenkins/test-data/02e66db6-ee0b-0338-51ff-56955f597119/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-11T20:13:38,731 INFO [RS:2;5be53b084ac7:44703 {}] regionserver.HRegionServer(746): ClusterId : 1681ab22-046c-405d-adf8-ae8657f45af2 2024-12-11T20:13:38,731 INFO [RS:0;5be53b084ac7:39105 {}] regionserver.HRegionServer(746): ClusterId : 1681ab22-046c-405d-adf8-ae8657f45af2 2024-12-11T20:13:38,732 INFO [RS:1;5be53b084ac7:41591 {}] regionserver.HRegionServer(746): ClusterId : 1681ab22-046c-405d-adf8-ae8657f45af2 2024-12-11T20:13:38,734 DEBUG [RS:2;5be53b084ac7:44703 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-11T20:13:38,734 DEBUG [RS:0;5be53b084ac7:39105 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-11T20:13:38,734 DEBUG [RS:1;5be53b084ac7:41591 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-11T20:13:38,736 DEBUG [master/5be53b084ac7:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-11T20:13:38,748 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-11T20:13:38,757 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-11T20:13:38,763 DEBUG [RS:2;5be53b084ac7:44703 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-11T20:13:38,763 DEBUG [RS:0;5be53b084ac7:39105 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-11T20:13:38,763 DEBUG [RS:1;5be53b084ac7:41591 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-11T20:13:38,763 DEBUG [RS:2;5be53b084ac7:44703 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-11T20:13:38,763 DEBUG [RS:0;5be53b084ac7:39105 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-11T20:13:38,763 DEBUG [RS:1;5be53b084ac7:41591 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-11T20:13:38,765 DEBUG [master/5be53b084ac7:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 5be53b084ac7,38845,1733948016780 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-11T20:13:38,778 DEBUG [RS:1;5be53b084ac7:41591 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-11T20:13:38,778 DEBUG [RS:2;5be53b084ac7:44703 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-11T20:13:38,778 DEBUG [RS:0;5be53b084ac7:39105 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-11T20:13:38,779 DEBUG [RS:1;5be53b084ac7:41591 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5f390c41, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=5be53b084ac7/172.17.0.2:0 2024-12-11T20:13:38,779 DEBUG [RS:0;5be53b084ac7:39105 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@57459c80, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=5be53b084ac7/172.17.0.2:0 2024-12-11T20:13:38,779 DEBUG [RS:2;5be53b084ac7:44703 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1d3540be, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=5be53b084ac7/172.17.0.2:0 2024-12-11T20:13:38,780 DEBUG [master/5be53b084ac7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/5be53b084ac7:0, corePoolSize=5, maxPoolSize=5 2024-12-11T20:13:38,780 DEBUG [master/5be53b084ac7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/5be53b084ac7:0, corePoolSize=5, maxPoolSize=5 2024-12-11T20:13:38,781 DEBUG [master/5be53b084ac7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/5be53b084ac7:0, corePoolSize=5, maxPoolSize=5 2024-12-11T20:13:38,781 DEBUG [master/5be53b084ac7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/5be53b084ac7:0, corePoolSize=5, maxPoolSize=5 2024-12-11T20:13:38,781 DEBUG [master/5be53b084ac7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/5be53b084ac7:0, corePoolSize=10, maxPoolSize=10 2024-12-11T20:13:38,781 DEBUG [master/5be53b084ac7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/5be53b084ac7:0, corePoolSize=1, maxPoolSize=1 2024-12-11T20:13:38,781 DEBUG [master/5be53b084ac7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/5be53b084ac7:0, corePoolSize=2, maxPoolSize=2 2024-12-11T20:13:38,782 DEBUG [master/5be53b084ac7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/5be53b084ac7:0, corePoolSize=1, maxPoolSize=1 2024-12-11T20:13:38,792 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733948048791 2024-12-11T20:13:38,793 DEBUG [RS:1;5be53b084ac7:41591 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;5be53b084ac7:41591 2024-12-11T20:13:38,793 DEBUG [RS:2;5be53b084ac7:44703 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:2;5be53b084ac7:44703 2024-12-11T20:13:38,794 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-11T20:13:38,795 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-11T20:13:38,796 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-11T20:13:38,797 DEBUG [RS:0;5be53b084ac7:39105 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;5be53b084ac7:39105 2024-12-11T20:13:38,797 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-11T20:13:38,798 INFO [RS:0;5be53b084ac7:39105 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-11T20:13:38,798 INFO [RS:2;5be53b084ac7:44703 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-11T20:13:38,798 INFO [RS:1;5be53b084ac7:41591 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-11T20:13:38,798 INFO [RS:2;5be53b084ac7:44703 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-11T20:13:38,798 INFO [RS:1;5be53b084ac7:41591 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-11T20:13:38,798 INFO [RS:0;5be53b084ac7:39105 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-11T20:13:38,798 DEBUG [RS:2;5be53b084ac7:44703 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-11T20:13:38,798 DEBUG [RS:0;5be53b084ac7:39105 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-11T20:13:38,798 DEBUG [RS:1;5be53b084ac7:41591 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-11T20:13:38,800 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-11T20:13:38,800 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-11T20:13:38,800 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-11T20:13:38,801 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-11T20:13:38,801 INFO [RS:1;5be53b084ac7:41591 {}] regionserver.HRegionServer(2659): reportForDuty to master=5be53b084ac7,38845,1733948016780 with port=41591, startcode=1733948017637 2024-12-11T20:13:38,801 INFO [RS:0;5be53b084ac7:39105 {}] regionserver.HRegionServer(2659): reportForDuty to master=5be53b084ac7,38845,1733948016780 with port=39105, startcode=1733948017497 2024-12-11T20:13:38,801 INFO [RS:2;5be53b084ac7:44703 {}] regionserver.HRegionServer(2659): reportForDuty to master=5be53b084ac7,38845,1733948016780 with port=44703, startcode=1733948017700 2024-12-11T20:13:38,803 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T20:13:38,803 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-11T20:13:38,803 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-11T20:13:38,807 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-11T20:13:38,808 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-11T20:13:38,808 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-11T20:13:38,814 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-11T20:13:38,814 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-11T20:13:38,815 DEBUG [RS:0;5be53b084ac7:39105 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-11T20:13:38,815 DEBUG [RS:2;5be53b084ac7:44703 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-11T20:13:38,815 DEBUG [RS:1;5be53b084ac7:41591 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-11T20:13:38,817 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-11T20:13:38,817 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-11T20:13:38,819 DEBUG [master/5be53b084ac7:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/5be53b084ac7:0:becomeActiveMaster-HFileCleaner.large.0-1733948018816,5,FailOnTimeoutGroup] 2024-12-11T20:13:38,823 DEBUG [master/5be53b084ac7:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/5be53b084ac7:0:becomeActiveMaster-HFileCleaner.small.0-1733948018819,5,FailOnTimeoutGroup] 2024-12-11T20:13:38,823 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-11T20:13:38,823 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-11T20:13:38,825 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-11T20:13:38,825 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-11T20:13:38,827 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1152522736_22 at /127.0.0.1:39884 [Receiving block BP-1304524823-172.17.0.2-1733948012709:blk_-9223372036854775712_1012] {}] datanode.DataXceiver(331): 127.0.0.1:33711:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:39884 dst: /127.0.0.1:33711 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T20:13:38,833 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33711 is added to blk_-9223372036854775712_1013 (size=1321) 2024-12-11T20:13:38,835 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-11T20:13:38,837 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:39591/user/jenkins/test-data/02e66db6-ee0b-0338-51ff-56955f597119/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-11T20:13:38,838 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:39591/user/jenkins/test-data/02e66db6-ee0b-0338-51ff-56955f597119 2024-12-11T20:13:38,856 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-11T20:13:38,856 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52745, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-12-11T20:13:38,856 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-11T20:13:38,859 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50367, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-12-11T20:13:38,860 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60571, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-12-11T20:13:38,859 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1152522736_22 at /127.0.0.1:45976 [Receiving block BP-1304524823-172.17.0.2-1733948012709:blk_-9223372036854775696_1014] {}] datanode.DataXceiver(331): 127.0.0.1:32959:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45976 dst: /127.0.0.1:32959 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T20:13:38,864 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32959 is added to blk_-9223372036854775696_1015 (size=32) 2024-12-11T20:13:38,864 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38845 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 5be53b084ac7,39105,1733948017497 2024-12-11T20:13:38,865 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-11T20:13:38,867 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T20:13:38,867 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38845 {}] master.ServerManager(517): Registering regionserver=5be53b084ac7,39105,1733948017497 2024-12-11T20:13:38,880 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-11T20:13:38,882 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38845 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 5be53b084ac7,41591,1733948017637 2024-12-11T20:13:38,882 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38845 {}] master.ServerManager(517): Registering regionserver=5be53b084ac7,41591,1733948017637 2024-12-11T20:13:38,883 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-11T20:13:38,883 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T20:13:38,886 DEBUG [RS:0;5be53b084ac7:39105 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:39591/user/jenkins/test-data/02e66db6-ee0b-0338-51ff-56955f597119 2024-12-11T20:13:38,887 DEBUG [RS:0;5be53b084ac7:39105 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:39591 2024-12-11T20:13:38,887 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-11T20:13:38,887 DEBUG [RS:0;5be53b084ac7:39105 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-11T20:13:38,887 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-11T20:13:38,888 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38845 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 5be53b084ac7,44703,1733948017700 2024-12-11T20:13:38,888 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38845 {}] master.ServerManager(517): Registering regionserver=5be53b084ac7,44703,1733948017700 2024-12-11T20:13:38,888 DEBUG [RS:1;5be53b084ac7:41591 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:39591/user/jenkins/test-data/02e66db6-ee0b-0338-51ff-56955f597119 2024-12-11T20:13:38,888 DEBUG [RS:1;5be53b084ac7:41591 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:39591 2024-12-11T20:13:38,889 DEBUG [RS:1;5be53b084ac7:41591 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-11T20:13:38,891 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-11T20:13:38,892 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T20:13:38,892 DEBUG [RS:2;5be53b084ac7:44703 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:39591/user/jenkins/test-data/02e66db6-ee0b-0338-51ff-56955f597119 2024-12-11T20:13:38,892 DEBUG [RS:2;5be53b084ac7:44703 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:39591 2024-12-11T20:13:38,892 DEBUG [RS:2;5be53b084ac7:44703 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-11T20:13:38,893 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-11T20:13:38,893 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-11T20:13:38,896 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-11T20:13:38,896 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T20:13:38,897 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-11T20:13:38,897 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-11T20:13:38,901 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-11T20:13:38,901 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T20:13:38,902 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-11T20:13:38,902 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-11T20:13:38,904 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39591/user/jenkins/test-data/02e66db6-ee0b-0338-51ff-56955f597119/data/hbase/meta/1588230740 2024-12-11T20:13:38,905 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39591/user/jenkins/test-data/02e66db6-ee0b-0338-51ff-56955f597119/data/hbase/meta/1588230740 2024-12-11T20:13:38,910 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-11T20:13:38,910 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-11T20:13:38,912 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-11T20:13:38,919 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-11T20:13:38,923 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38845-0x1001690c3c70000, quorum=127.0.0.1:60082, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-11T20:13:38,937 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39591/user/jenkins/test-data/02e66db6-ee0b-0338-51ff-56955f597119/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-11T20:13:38,939 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=69353734, jitterRate=0.03345116972923279}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-11T20:13:38,942 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733948018867Initializing all the Stores at 1733948018872 (+5 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733948018872Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733948018880 (+8 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733948018880Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733948018880Cleaning up temporary data from old regions at 1733948018910 (+30 ms)Region opened successfully at 1733948018942 (+32 ms) 2024-12-11T20:13:38,942 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-11T20:13:38,942 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-11T20:13:38,943 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-11T20:13:38,943 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-11T20:13:38,943 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-11T20:13:38,952 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-11T20:13:38,952 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733948018942Disabling compacts and flushes for region at 1733948018942Disabling writes for close at 1733948018943 (+1 ms)Writing region close event to WAL at 1733948018951 (+8 ms)Closed at 1733948018952 (+1 ms) 2024-12-11T20:13:38,956 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-11T20:13:38,956 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-11T20:13:38,966 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-11T20:13:38,971 DEBUG [RS:0;5be53b084ac7:39105 {}] zookeeper.ZKUtil(111): regionserver:39105-0x1001690c3c70001, quorum=127.0.0.1:60082, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/5be53b084ac7,39105,1733948017497 2024-12-11T20:13:38,971 DEBUG [RS:2;5be53b084ac7:44703 {}] zookeeper.ZKUtil(111): regionserver:44703-0x1001690c3c70003, quorum=127.0.0.1:60082, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/5be53b084ac7,44703,1733948017700 2024-12-11T20:13:38,971 WARN [RS:0;5be53b084ac7:39105 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-11T20:13:38,971 WARN [RS:2;5be53b084ac7:44703 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-11T20:13:38,971 DEBUG [RS:1;5be53b084ac7:41591 {}] zookeeper.ZKUtil(111): regionserver:41591-0x1001690c3c70002, quorum=127.0.0.1:60082, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/5be53b084ac7,41591,1733948017637 2024-12-11T20:13:38,971 WARN [RS:1;5be53b084ac7:41591 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-11T20:13:38,971 INFO [RS:0;5be53b084ac7:39105 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-11T20:13:38,971 INFO [RS:2;5be53b084ac7:44703 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-11T20:13:38,971 INFO [RS:1;5be53b084ac7:41591 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-11T20:13:38,971 DEBUG [RS:2;5be53b084ac7:44703 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:39591/user/jenkins/test-data/02e66db6-ee0b-0338-51ff-56955f597119/WALs/5be53b084ac7,44703,1733948017700 2024-12-11T20:13:38,971 DEBUG [RS:0;5be53b084ac7:39105 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:39591/user/jenkins/test-data/02e66db6-ee0b-0338-51ff-56955f597119/WALs/5be53b084ac7,39105,1733948017497 2024-12-11T20:13:38,973 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [5be53b084ac7,41591,1733948017637] 2024-12-11T20:13:38,973 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [5be53b084ac7,39105,1733948017497] 2024-12-11T20:13:38,971 DEBUG [RS:1;5be53b084ac7:41591 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:39591/user/jenkins/test-data/02e66db6-ee0b-0338-51ff-56955f597119/WALs/5be53b084ac7,41591,1733948017637 2024-12-11T20:13:38,973 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [5be53b084ac7,44703,1733948017700] 2024-12-11T20:13:38,977 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-11T20:13:38,981 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-11T20:13:39,008 INFO [RS:2;5be53b084ac7:44703 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-11T20:13:39,008 INFO [RS:0;5be53b084ac7:39105 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-11T20:13:39,011 INFO [RS:1;5be53b084ac7:41591 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-11T20:13:39,024 INFO [RS:1;5be53b084ac7:41591 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-11T20:13:39,024 INFO [RS:0;5be53b084ac7:39105 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-11T20:13:39,024 INFO [RS:2;5be53b084ac7:44703 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-11T20:13:39,030 INFO [RS:0;5be53b084ac7:39105 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-11T20:13:39,030 INFO [RS:2;5be53b084ac7:44703 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-11T20:13:39,030 INFO [RS:1;5be53b084ac7:41591 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-11T20:13:39,030 INFO [RS:0;5be53b084ac7:39105 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-11T20:13:39,030 INFO [RS:2;5be53b084ac7:44703 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-11T20:13:39,030 INFO [RS:1;5be53b084ac7:41591 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-11T20:13:39,031 INFO [RS:0;5be53b084ac7:39105 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-11T20:13:39,039 INFO [RS:0;5be53b084ac7:39105 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-11T20:13:39,042 INFO [RS:0;5be53b084ac7:39105 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-11T20:13:39,042 DEBUG [RS:0;5be53b084ac7:39105 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/5be53b084ac7:0, corePoolSize=1, maxPoolSize=1 2024-12-11T20:13:39,042 DEBUG [RS:0;5be53b084ac7:39105 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/5be53b084ac7:0, corePoolSize=1, maxPoolSize=1 2024-12-11T20:13:39,042 DEBUG [RS:0;5be53b084ac7:39105 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/5be53b084ac7:0, corePoolSize=1, maxPoolSize=1 2024-12-11T20:13:39,042 DEBUG [RS:0;5be53b084ac7:39105 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/5be53b084ac7:0, corePoolSize=1, maxPoolSize=1 2024-12-11T20:13:39,043 DEBUG [RS:0;5be53b084ac7:39105 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/5be53b084ac7:0, corePoolSize=1, maxPoolSize=1 2024-12-11T20:13:39,043 DEBUG [RS:0;5be53b084ac7:39105 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/5be53b084ac7:0, corePoolSize=2, maxPoolSize=2 2024-12-11T20:13:39,043 INFO [RS:2;5be53b084ac7:44703 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-11T20:13:39,043 DEBUG [RS:0;5be53b084ac7:39105 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/5be53b084ac7:0, corePoolSize=1, maxPoolSize=1 2024-12-11T20:13:39,043 DEBUG [RS:0;5be53b084ac7:39105 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/5be53b084ac7:0, corePoolSize=1, maxPoolSize=1 2024-12-11T20:13:39,043 DEBUG [RS:0;5be53b084ac7:39105 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/5be53b084ac7:0, corePoolSize=1, maxPoolSize=1 2024-12-11T20:13:39,043 DEBUG [RS:0;5be53b084ac7:39105 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/5be53b084ac7:0, corePoolSize=1, maxPoolSize=1 2024-12-11T20:13:39,043 DEBUG [RS:0;5be53b084ac7:39105 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/5be53b084ac7:0, corePoolSize=1, maxPoolSize=1 2024-12-11T20:13:39,043 INFO [RS:1;5be53b084ac7:41591 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-11T20:13:39,044 DEBUG [RS:0;5be53b084ac7:39105 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/5be53b084ac7:0, corePoolSize=1, maxPoolSize=1 2024-12-11T20:13:39,044 DEBUG [RS:0;5be53b084ac7:39105 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0, corePoolSize=3, maxPoolSize=3 2024-12-11T20:13:39,044 DEBUG [RS:0;5be53b084ac7:39105 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/5be53b084ac7:0, corePoolSize=3, maxPoolSize=3 2024-12-11T20:13:39,044 INFO [RS:2;5be53b084ac7:44703 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-11T20:13:39,045 INFO [RS:2;5be53b084ac7:44703 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-11T20:13:39,045 DEBUG [RS:2;5be53b084ac7:44703 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/5be53b084ac7:0, corePoolSize=1, maxPoolSize=1 2024-12-11T20:13:39,045 DEBUG [RS:2;5be53b084ac7:44703 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/5be53b084ac7:0, corePoolSize=1, maxPoolSize=1 2024-12-11T20:13:39,045 DEBUG [RS:2;5be53b084ac7:44703 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/5be53b084ac7:0, corePoolSize=1, maxPoolSize=1 2024-12-11T20:13:39,045 DEBUG [RS:2;5be53b084ac7:44703 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/5be53b084ac7:0, corePoolSize=1, maxPoolSize=1 2024-12-11T20:13:39,046 DEBUG [RS:2;5be53b084ac7:44703 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/5be53b084ac7:0, corePoolSize=1, maxPoolSize=1 2024-12-11T20:13:39,046 DEBUG [RS:2;5be53b084ac7:44703 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/5be53b084ac7:0, corePoolSize=2, maxPoolSize=2 2024-12-11T20:13:39,046 INFO [RS:1;5be53b084ac7:41591 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-11T20:13:39,046 DEBUG [RS:2;5be53b084ac7:44703 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/5be53b084ac7:0, corePoolSize=1, maxPoolSize=1 2024-12-11T20:13:39,046 DEBUG [RS:2;5be53b084ac7:44703 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/5be53b084ac7:0, corePoolSize=1, maxPoolSize=1 2024-12-11T20:13:39,046 INFO [RS:1;5be53b084ac7:41591 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-11T20:13:39,046 DEBUG [RS:2;5be53b084ac7:44703 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/5be53b084ac7:0, corePoolSize=1, maxPoolSize=1 2024-12-11T20:13:39,046 DEBUG [RS:1;5be53b084ac7:41591 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/5be53b084ac7:0, corePoolSize=1, maxPoolSize=1 2024-12-11T20:13:39,046 DEBUG [RS:2;5be53b084ac7:44703 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/5be53b084ac7:0, corePoolSize=1, maxPoolSize=1 2024-12-11T20:13:39,046 DEBUG [RS:2;5be53b084ac7:44703 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/5be53b084ac7:0, corePoolSize=1, maxPoolSize=1 2024-12-11T20:13:39,046 DEBUG [RS:2;5be53b084ac7:44703 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/5be53b084ac7:0, corePoolSize=1, maxPoolSize=1 2024-12-11T20:13:39,046 DEBUG [RS:2;5be53b084ac7:44703 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0, corePoolSize=3, maxPoolSize=3 2024-12-11T20:13:39,047 DEBUG [RS:2;5be53b084ac7:44703 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/5be53b084ac7:0, corePoolSize=3, maxPoolSize=3 2024-12-11T20:13:39,047 DEBUG [RS:1;5be53b084ac7:41591 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/5be53b084ac7:0, corePoolSize=1, maxPoolSize=1 2024-12-11T20:13:39,047 DEBUG [RS:1;5be53b084ac7:41591 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/5be53b084ac7:0, corePoolSize=1, maxPoolSize=1 2024-12-11T20:13:39,047 DEBUG [RS:1;5be53b084ac7:41591 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/5be53b084ac7:0, corePoolSize=1, maxPoolSize=1 2024-12-11T20:13:39,047 DEBUG [RS:1;5be53b084ac7:41591 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/5be53b084ac7:0, corePoolSize=1, maxPoolSize=1 2024-12-11T20:13:39,047 DEBUG [RS:1;5be53b084ac7:41591 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/5be53b084ac7:0, corePoolSize=2, maxPoolSize=2 2024-12-11T20:13:39,047 DEBUG [RS:1;5be53b084ac7:41591 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/5be53b084ac7:0, corePoolSize=1, maxPoolSize=1 2024-12-11T20:13:39,047 DEBUG [RS:1;5be53b084ac7:41591 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/5be53b084ac7:0, corePoolSize=1, maxPoolSize=1 2024-12-11T20:13:39,047 DEBUG [RS:1;5be53b084ac7:41591 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/5be53b084ac7:0, corePoolSize=1, maxPoolSize=1 2024-12-11T20:13:39,047 DEBUG [RS:1;5be53b084ac7:41591 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/5be53b084ac7:0, corePoolSize=1, maxPoolSize=1 2024-12-11T20:13:39,047 DEBUG [RS:1;5be53b084ac7:41591 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/5be53b084ac7:0, corePoolSize=1, maxPoolSize=1 2024-12-11T20:13:39,048 DEBUG [RS:1;5be53b084ac7:41591 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/5be53b084ac7:0, corePoolSize=1, maxPoolSize=1 2024-12-11T20:13:39,048 DEBUG [RS:1;5be53b084ac7:41591 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0, corePoolSize=3, maxPoolSize=3 2024-12-11T20:13:39,048 DEBUG [RS:1;5be53b084ac7:41591 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/5be53b084ac7:0, corePoolSize=3, maxPoolSize=3 2024-12-11T20:13:39,054 INFO [RS:0;5be53b084ac7:39105 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-11T20:13:39,054 INFO [RS:0;5be53b084ac7:39105 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-11T20:13:39,054 INFO [RS:0;5be53b084ac7:39105 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-11T20:13:39,054 INFO [RS:0;5be53b084ac7:39105 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-11T20:13:39,055 INFO [RS:0;5be53b084ac7:39105 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-11T20:13:39,055 INFO [RS:0;5be53b084ac7:39105 {}] hbase.ChoreService(168): Chore ScheduledChore name=5be53b084ac7,39105,1733948017497-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-11T20:13:39,056 INFO [RS:1;5be53b084ac7:41591 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-11T20:13:39,056 INFO [RS:1;5be53b084ac7:41591 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-11T20:13:39,056 INFO [RS:1;5be53b084ac7:41591 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-11T20:13:39,057 INFO [RS:1;5be53b084ac7:41591 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-11T20:13:39,057 INFO [RS:1;5be53b084ac7:41591 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-11T20:13:39,057 INFO [RS:1;5be53b084ac7:41591 {}] hbase.ChoreService(168): Chore ScheduledChore name=5be53b084ac7,41591,1733948017637-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-11T20:13:39,059 INFO [RS:2;5be53b084ac7:44703 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-11T20:13:39,059 INFO [RS:2;5be53b084ac7:44703 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-11T20:13:39,059 INFO [RS:2;5be53b084ac7:44703 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-11T20:13:39,059 INFO [RS:2;5be53b084ac7:44703 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-11T20:13:39,059 INFO [RS:2;5be53b084ac7:44703 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-11T20:13:39,060 INFO [RS:2;5be53b084ac7:44703 {}] hbase.ChoreService(168): Chore ScheduledChore name=5be53b084ac7,44703,1733948017700-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-11T20:13:39,075 INFO [RS:0;5be53b084ac7:39105 {}] regionserver.HeapMemoryManager(213): Starting, tuneOn=false 2024-12-11T20:13:39,077 INFO [RS:0;5be53b084ac7:39105 {}] hbase.ChoreService(168): Chore ScheduledChore name=5be53b084ac7,39105,1733948017497-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-11T20:13:39,077 INFO [RS:0;5be53b084ac7:39105 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-11T20:13:39,078 INFO [RS:0;5be53b084ac7:39105 {}] regionserver.Replication(171): 5be53b084ac7,39105,1733948017497 started 2024-12-11T20:13:39,084 INFO [RS:2;5be53b084ac7:44703 {}] regionserver.HeapMemoryManager(213): Starting, tuneOn=false 2024-12-11T20:13:39,084 INFO [RS:2;5be53b084ac7:44703 {}] hbase.ChoreService(168): Chore ScheduledChore name=5be53b084ac7,44703,1733948017700-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-11T20:13:39,084 INFO [RS:2;5be53b084ac7:44703 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-11T20:13:39,084 INFO [RS:2;5be53b084ac7:44703 {}] regionserver.Replication(171): 5be53b084ac7,44703,1733948017700 started 2024-12-11T20:13:39,087 INFO [RS:1;5be53b084ac7:41591 {}] regionserver.HeapMemoryManager(213): Starting, tuneOn=false 2024-12-11T20:13:39,087 INFO [RS:1;5be53b084ac7:41591 {}] hbase.ChoreService(168): Chore ScheduledChore name=5be53b084ac7,41591,1733948017637-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-11T20:13:39,088 INFO [RS:1;5be53b084ac7:41591 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-11T20:13:39,088 INFO [RS:1;5be53b084ac7:41591 {}] regionserver.Replication(171): 5be53b084ac7,41591,1733948017637 started 2024-12-11T20:13:39,097 INFO [RS:0;5be53b084ac7:39105 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-11T20:13:39,097 INFO [RS:0;5be53b084ac7:39105 {}] regionserver.HRegionServer(1482): Serving as 5be53b084ac7,39105,1733948017497, RpcServer on 5be53b084ac7/172.17.0.2:39105, sessionid=0x1001690c3c70001 2024-12-11T20:13:39,098 DEBUG [RS:0;5be53b084ac7:39105 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-11T20:13:39,098 DEBUG [RS:0;5be53b084ac7:39105 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 5be53b084ac7,39105,1733948017497 2024-12-11T20:13:39,099 DEBUG [RS:0;5be53b084ac7:39105 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '5be53b084ac7,39105,1733948017497' 2024-12-11T20:13:39,099 DEBUG [RS:0;5be53b084ac7:39105 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-11T20:13:39,100 DEBUG [RS:0;5be53b084ac7:39105 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-11T20:13:39,102 DEBUG [RS:0;5be53b084ac7:39105 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-11T20:13:39,102 DEBUG [RS:0;5be53b084ac7:39105 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-11T20:13:39,102 DEBUG [RS:0;5be53b084ac7:39105 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 5be53b084ac7,39105,1733948017497 2024-12-11T20:13:39,102 DEBUG [RS:0;5be53b084ac7:39105 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '5be53b084ac7,39105,1733948017497' 2024-12-11T20:13:39,102 DEBUG [RS:0;5be53b084ac7:39105 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-11T20:13:39,103 DEBUG [RS:0;5be53b084ac7:39105 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-11T20:13:39,104 DEBUG [RS:0;5be53b084ac7:39105 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-11T20:13:39,104 INFO [RS:0;5be53b084ac7:39105 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-11T20:13:39,104 INFO [RS:0;5be53b084ac7:39105 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-11T20:13:39,107 INFO [RS:2;5be53b084ac7:44703 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-11T20:13:39,107 INFO [RS:2;5be53b084ac7:44703 {}] regionserver.HRegionServer(1482): Serving as 5be53b084ac7,44703,1733948017700, RpcServer on 5be53b084ac7/172.17.0.2:44703, sessionid=0x1001690c3c70003 2024-12-11T20:13:39,107 DEBUG [RS:2;5be53b084ac7:44703 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-11T20:13:39,108 DEBUG [RS:2;5be53b084ac7:44703 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 5be53b084ac7,44703,1733948017700 2024-12-11T20:13:39,108 DEBUG [RS:2;5be53b084ac7:44703 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '5be53b084ac7,44703,1733948017700' 2024-12-11T20:13:39,108 DEBUG [RS:2;5be53b084ac7:44703 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-11T20:13:39,108 INFO [RS:1;5be53b084ac7:41591 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-11T20:13:39,108 INFO [RS:1;5be53b084ac7:41591 {}] regionserver.HRegionServer(1482): Serving as 5be53b084ac7,41591,1733948017637, RpcServer on 5be53b084ac7/172.17.0.2:41591, sessionid=0x1001690c3c70002 2024-12-11T20:13:39,108 DEBUG [RS:1;5be53b084ac7:41591 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-11T20:13:39,108 DEBUG [RS:1;5be53b084ac7:41591 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 5be53b084ac7,41591,1733948017637 2024-12-11T20:13:39,108 DEBUG [RS:1;5be53b084ac7:41591 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '5be53b084ac7,41591,1733948017637' 2024-12-11T20:13:39,108 DEBUG [RS:1;5be53b084ac7:41591 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-11T20:13:39,108 DEBUG [RS:2;5be53b084ac7:44703 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-11T20:13:39,109 DEBUG [RS:1;5be53b084ac7:41591 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-11T20:13:39,109 DEBUG [RS:2;5be53b084ac7:44703 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-11T20:13:39,109 DEBUG [RS:2;5be53b084ac7:44703 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-11T20:13:39,109 DEBUG [RS:1;5be53b084ac7:41591 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-11T20:13:39,109 DEBUG [RS:1;5be53b084ac7:41591 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-11T20:13:39,109 DEBUG [RS:2;5be53b084ac7:44703 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 5be53b084ac7,44703,1733948017700 2024-12-11T20:13:39,109 DEBUG [RS:1;5be53b084ac7:41591 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 5be53b084ac7,41591,1733948017637 2024-12-11T20:13:39,109 DEBUG [RS:2;5be53b084ac7:44703 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '5be53b084ac7,44703,1733948017700' 2024-12-11T20:13:39,109 DEBUG [RS:1;5be53b084ac7:41591 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '5be53b084ac7,41591,1733948017637' 2024-12-11T20:13:39,109 DEBUG [RS:1;5be53b084ac7:41591 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-11T20:13:39,109 DEBUG [RS:2;5be53b084ac7:44703 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-11T20:13:39,110 DEBUG [RS:1;5be53b084ac7:41591 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-11T20:13:39,110 DEBUG [RS:1;5be53b084ac7:41591 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-11T20:13:39,110 INFO [RS:1;5be53b084ac7:41591 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-11T20:13:39,110 INFO [RS:1;5be53b084ac7:41591 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-11T20:13:39,111 DEBUG [RS:2;5be53b084ac7:44703 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-11T20:13:39,111 DEBUG [RS:2;5be53b084ac7:44703 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-11T20:13:39,111 INFO [RS:2;5be53b084ac7:44703 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-11T20:13:39,111 INFO [RS:2;5be53b084ac7:44703 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-11T20:13:39,132 WARN [5be53b084ac7:38845 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-11T20:13:39,209 INFO [RS:0;5be53b084ac7:39105 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-11T20:13:39,211 INFO [RS:1;5be53b084ac7:41591 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-11T20:13:39,212 INFO [RS:0;5be53b084ac7:39105 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=5be53b084ac7%2C39105%2C1733948017497, suffix=, logDir=hdfs://localhost:39591/user/jenkins/test-data/02e66db6-ee0b-0338-51ff-56955f597119/WALs/5be53b084ac7,39105,1733948017497, archiveDir=hdfs://localhost:39591/user/jenkins/test-data/02e66db6-ee0b-0338-51ff-56955f597119/oldWALs, maxLogs=32 2024-12-11T20:13:39,212 INFO [RS:2;5be53b084ac7:44703 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-11T20:13:39,214 INFO [RS:1;5be53b084ac7:41591 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=5be53b084ac7%2C41591%2C1733948017637, suffix=, logDir=hdfs://localhost:39591/user/jenkins/test-data/02e66db6-ee0b-0338-51ff-56955f597119/WALs/5be53b084ac7,41591,1733948017637, archiveDir=hdfs://localhost:39591/user/jenkins/test-data/02e66db6-ee0b-0338-51ff-56955f597119/oldWALs, maxLogs=32 2024-12-11T20:13:39,216 INFO [RS:2;5be53b084ac7:44703 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=5be53b084ac7%2C44703%2C1733948017700, suffix=, logDir=hdfs://localhost:39591/user/jenkins/test-data/02e66db6-ee0b-0338-51ff-56955f597119/WALs/5be53b084ac7,44703,1733948017700, archiveDir=hdfs://localhost:39591/user/jenkins/test-data/02e66db6-ee0b-0338-51ff-56955f597119/oldWALs, maxLogs=32 2024-12-11T20:13:39,229 DEBUG [RS:0;5be53b084ac7:39105 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/02e66db6-ee0b-0338-51ff-56955f597119/WALs/5be53b084ac7,39105,1733948017497/5be53b084ac7%2C39105%2C1733948017497.1733948019214, exclude list is [], retry=0 2024-12-11T20:13:39,229 DEBUG [RS:1;5be53b084ac7:41591 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/02e66db6-ee0b-0338-51ff-56955f597119/WALs/5be53b084ac7,41591,1733948017637/5be53b084ac7%2C41591%2C1733948017637.1733948019216, exclude list is [], retry=0 2024-12-11T20:13:39,234 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:33711,DS-af130960-5ef3-4fcb-8a05-e36235713240,DISK] 2024-12-11T20:13:39,234 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:33711,DS-af130960-5ef3-4fcb-8a05-e36235713240,DISK] 2024-12-11T20:13:39,235 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:32959,DS-5657614d-cdac-4dfa-ab74-caa716a14611,DISK] 2024-12-11T20:13:39,235 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43873,DS-24c49129-1211-49ca-801c-19fe9af7aa06,DISK] 2024-12-11T20:13:39,234 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:32959,DS-5657614d-cdac-4dfa-ab74-caa716a14611,DISK] 2024-12-11T20:13:39,235 DEBUG [RS:2;5be53b084ac7:44703 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/02e66db6-ee0b-0338-51ff-56955f597119/WALs/5be53b084ac7,44703,1733948017700/5be53b084ac7%2C44703%2C1733948017700.1733948019218, exclude list is [], retry=0 2024-12-11T20:13:39,235 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43873,DS-24c49129-1211-49ca-801c-19fe9af7aa06,DISK] 2024-12-11T20:13:39,265 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43873,DS-24c49129-1211-49ca-801c-19fe9af7aa06,DISK] 2024-12-11T20:13:39,265 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:33711,DS-af130960-5ef3-4fcb-8a05-e36235713240,DISK] 2024-12-11T20:13:39,265 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:32959,DS-5657614d-cdac-4dfa-ab74-caa716a14611,DISK] 2024-12-11T20:13:39,267 INFO [RS:0;5be53b084ac7:39105 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/02e66db6-ee0b-0338-51ff-56955f597119/WALs/5be53b084ac7,39105,1733948017497/5be53b084ac7%2C39105%2C1733948017497.1733948019214 2024-12-11T20:13:39,267 DEBUG [RS:0;5be53b084ac7:39105 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:46787:46787),(127.0.0.1/127.0.0.1:42127:42127),(127.0.0.1/127.0.0.1:40373:40373)] 2024-12-11T20:13:39,271 INFO [RS:1;5be53b084ac7:41591 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/02e66db6-ee0b-0338-51ff-56955f597119/WALs/5be53b084ac7,41591,1733948017637/5be53b084ac7%2C41591%2C1733948017637.1733948019216 2024-12-11T20:13:39,273 DEBUG [RS:1;5be53b084ac7:41591 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:42127:42127),(127.0.0.1/127.0.0.1:40373:40373),(127.0.0.1/127.0.0.1:46787:46787)] 2024-12-11T20:13:39,275 INFO [RS:2;5be53b084ac7:44703 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/02e66db6-ee0b-0338-51ff-56955f597119/WALs/5be53b084ac7,44703,1733948017700/5be53b084ac7%2C44703%2C1733948017700.1733948019218 2024-12-11T20:13:39,276 DEBUG [RS:2;5be53b084ac7:44703 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:42127:42127),(127.0.0.1/127.0.0.1:40373:40373),(127.0.0.1/127.0.0.1:46787:46787)] 2024-12-11T20:13:39,385 DEBUG [5be53b084ac7:38845 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=3, allServersCount=3 2024-12-11T20:13:39,393 DEBUG [5be53b084ac7:38845 {}] balancer.BalancerClusterState(204): Hosts are {5be53b084ac7=0} racks are {/default-rack=0} 2024-12-11T20:13:39,399 DEBUG [5be53b084ac7:38845 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-11T20:13:39,399 DEBUG [5be53b084ac7:38845 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-11T20:13:39,399 DEBUG [5be53b084ac7:38845 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-11T20:13:39,399 DEBUG [5be53b084ac7:38845 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-11T20:13:39,399 DEBUG [5be53b084ac7:38845 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-11T20:13:39,399 DEBUG [5be53b084ac7:38845 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-11T20:13:39,399 INFO [5be53b084ac7:38845 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-11T20:13:39,399 INFO [5be53b084ac7:38845 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-11T20:13:39,399 INFO [5be53b084ac7:38845 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-11T20:13:39,399 DEBUG [5be53b084ac7:38845 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-11T20:13:39,406 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=5be53b084ac7,44703,1733948017700 2024-12-11T20:13:39,412 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 5be53b084ac7,44703,1733948017700, state=OPENING 2024-12-11T20:13:39,444 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-11T20:13:39,456 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44703-0x1001690c3c70003, quorum=127.0.0.1:60082, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T20:13:39,456 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41591-0x1001690c3c70002, quorum=127.0.0.1:60082, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T20:13:39,456 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39105-0x1001690c3c70001, quorum=127.0.0.1:60082, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T20:13:39,457 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38845-0x1001690c3c70000, quorum=127.0.0.1:60082, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T20:13:39,459 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-11T20:13:39,458 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-11T20:13:39,459 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-11T20:13:39,460 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-11T20:13:39,463 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-11T20:13:39,465 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=5be53b084ac7,44703,1733948017700}] 2024-12-11T20:13:39,638 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-11T20:13:39,640 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41521, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-11T20:13:39,651 INFO [RS_OPEN_META-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-11T20:13:39,651 INFO [RS_OPEN_META-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-11T20:13:39,652 INFO [RS_OPEN_META-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-12-11T20:13:39,655 INFO [RS_OPEN_META-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=5be53b084ac7%2C44703%2C1733948017700.meta, suffix=.meta, logDir=hdfs://localhost:39591/user/jenkins/test-data/02e66db6-ee0b-0338-51ff-56955f597119/WALs/5be53b084ac7,44703,1733948017700, archiveDir=hdfs://localhost:39591/user/jenkins/test-data/02e66db6-ee0b-0338-51ff-56955f597119/oldWALs, maxLogs=32 2024-12-11T20:13:39,671 DEBUG [RS_OPEN_META-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_META, pid=3}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/02e66db6-ee0b-0338-51ff-56955f597119/WALs/5be53b084ac7,44703,1733948017700/5be53b084ac7%2C44703%2C1733948017700.meta.1733948019656.meta, exclude list is [], retry=0 2024-12-11T20:13:39,675 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43873,DS-24c49129-1211-49ca-801c-19fe9af7aa06,DISK] 2024-12-11T20:13:39,675 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:32959,DS-5657614d-cdac-4dfa-ab74-caa716a14611,DISK] 2024-12-11T20:13:39,675 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:33711,DS-af130960-5ef3-4fcb-8a05-e36235713240,DISK] 2024-12-11T20:13:39,678 INFO [RS_OPEN_META-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/02e66db6-ee0b-0338-51ff-56955f597119/WALs/5be53b084ac7,44703,1733948017700/5be53b084ac7%2C44703%2C1733948017700.meta.1733948019656.meta 2024-12-11T20:13:39,679 DEBUG [RS_OPEN_META-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:46787:46787),(127.0.0.1/127.0.0.1:40373:40373),(127.0.0.1/127.0.0.1:42127:42127)] 2024-12-11T20:13:39,679 DEBUG [RS_OPEN_META-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-11T20:13:39,681 DEBUG [RS_OPEN_META-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-11T20:13:39,683 DEBUG [RS_OPEN_META-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-11T20:13:39,688 INFO [RS_OPEN_META-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-11T20:13:39,692 DEBUG [RS_OPEN_META-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-11T20:13:39,692 DEBUG [RS_OPEN_META-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T20:13:39,693 DEBUG [RS_OPEN_META-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-11T20:13:39,693 DEBUG [RS_OPEN_META-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-11T20:13:39,696 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-11T20:13:39,698 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-11T20:13:39,698 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T20:13:39,699 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-11T20:13:39,699 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-11T20:13:39,701 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-11T20:13:39,701 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T20:13:39,702 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-11T20:13:39,702 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-11T20:13:39,704 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-11T20:13:39,704 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T20:13:39,706 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-11T20:13:39,706 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-11T20:13:39,708 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-11T20:13:39,708 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T20:13:39,709 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-11T20:13:39,709 DEBUG [RS_OPEN_META-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-11T20:13:39,711 DEBUG [RS_OPEN_META-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39591/user/jenkins/test-data/02e66db6-ee0b-0338-51ff-56955f597119/data/hbase/meta/1588230740 2024-12-11T20:13:39,715 DEBUG [RS_OPEN_META-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39591/user/jenkins/test-data/02e66db6-ee0b-0338-51ff-56955f597119/data/hbase/meta/1588230740 2024-12-11T20:13:39,717 DEBUG [RS_OPEN_META-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-11T20:13:39,717 DEBUG [RS_OPEN_META-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-11T20:13:39,718 DEBUG [RS_OPEN_META-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-11T20:13:39,720 DEBUG [RS_OPEN_META-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-11T20:13:39,722 INFO [RS_OPEN_META-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=72537543, jitterRate=0.0808936208486557}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-11T20:13:39,722 DEBUG [RS_OPEN_META-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-11T20:13:39,723 DEBUG [RS_OPEN_META-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733948019693Writing region info on filesystem at 1733948019693Initializing all the Stores at 1733948019695 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733948019696 (+1 ms)Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733948019696Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733948019696Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733948019696Cleaning up temporary data from old regions at 1733948019717 (+21 ms)Running coprocessor post-open hooks at 1733948019722 (+5 ms)Region opened successfully at 1733948019723 (+1 ms) 2024-12-11T20:13:39,729 INFO [RS_OPEN_META-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733948019631 2024-12-11T20:13:39,738 DEBUG [RS_OPEN_META-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-11T20:13:39,739 INFO [RS_OPEN_META-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-11T20:13:39,741 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=5be53b084ac7,44703,1733948017700 2024-12-11T20:13:39,743 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 5be53b084ac7,44703,1733948017700, state=OPEN 2024-12-11T20:13:39,802 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38845-0x1001690c3c70000, quorum=127.0.0.1:60082, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-11T20:13:39,802 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39105-0x1001690c3c70001, quorum=127.0.0.1:60082, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-11T20:13:39,802 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44703-0x1001690c3c70003, quorum=127.0.0.1:60082, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-11T20:13:39,802 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41591-0x1001690c3c70002, quorum=127.0.0.1:60082, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-11T20:13:39,803 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-11T20:13:39,803 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-11T20:13:39,803 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-11T20:13:39,803 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-11T20:13:39,804 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=5be53b084ac7,44703,1733948017700 2024-12-11T20:13:39,816 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-11T20:13:39,816 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=5be53b084ac7,44703,1733948017700 in 339 msec 2024-12-11T20:13:39,826 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-11T20:13:39,826 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 854 msec 2024-12-11T20:13:39,828 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-11T20:13:39,828 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-11T20:13:39,853 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-11T20:13:39,855 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5be53b084ac7,44703,1733948017700, seqNum=-1] 2024-12-11T20:13:39,896 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T20:13:39,898 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37051, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T20:13:39,926 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.2280 sec 2024-12-11T20:13:39,927 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733948019926, completionTime=-1 2024-12-11T20:13:39,930 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=3; waited=0ms, expected min=3 server(s), max=3 server(s), master is running 2024-12-11T20:13:39,930 DEBUG [master/5be53b084ac7:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-11T20:13:39,960 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=3 2024-12-11T20:13:39,960 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733948079960 2024-12-11T20:13:39,960 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733948139960 2024-12-11T20:13:39,960 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 30 msec 2024-12-11T20:13:39,962 DEBUG [master/5be53b084ac7:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 1588230740 changed from -1.0 to 0.0, refreshing cache 2024-12-11T20:13:39,970 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5be53b084ac7,38845,1733948016780-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-11T20:13:39,970 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5be53b084ac7,38845,1733948016780-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-11T20:13:39,970 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5be53b084ac7,38845,1733948016780-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-11T20:13:39,972 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-5be53b084ac7:38845, period=300000, unit=MILLISECONDS is enabled. 2024-12-11T20:13:39,973 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-11T20:13:39,973 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-11T20:13:39,978 DEBUG [master/5be53b084ac7:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-11T20:13:40,001 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 2.197sec 2024-12-11T20:13:40,003 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-11T20:13:40,004 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-11T20:13:40,005 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-11T20:13:40,005 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-11T20:13:40,005 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-11T20:13:40,006 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5be53b084ac7,38845,1733948016780-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-11T20:13:40,007 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5be53b084ac7,38845,1733948016780-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-11T20:13:40,012 DEBUG [master/5be53b084ac7:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-11T20:13:40,013 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-11T20:13:40,013 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5be53b084ac7,38845,1733948016780-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-11T20:13:40,040 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5b97eda9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T20:13:40,045 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-12-11T20:13:40,045 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-12-11T20:13:40,049 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 5be53b084ac7,38845,-1 for getting cluster id 2024-12-11T20:13:40,053 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-11T20:13:40,062 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '1681ab22-046c-405d-adf8-ae8657f45af2' 2024-12-11T20:13:40,065 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-11T20:13:40,065 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "1681ab22-046c-405d-adf8-ae8657f45af2" 2024-12-11T20:13:40,066 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@aa5e6fc, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T20:13:40,066 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5be53b084ac7,38845,-1] 2024-12-11T20:13:40,069 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-11T20:13:40,071 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T20:13:40,074 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38704, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-11T20:13:40,077 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3e06e59e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T20:13:40,078 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-11T20:13:40,085 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5be53b084ac7,44703,1733948017700, seqNum=-1] 2024-12-11T20:13:40,085 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T20:13:40,088 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58224, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T20:13:40,108 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=5be53b084ac7,38845,1733948016780 2024-12-11T20:13:40,112 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-11T20:13:40,116 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.AsyncConnectionImpl(321): The fetched master address is 5be53b084ac7,38845,1733948016780 2024-12-11T20:13:40,118 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@45b7b835 2024-12-11T20:13:40,119 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-11T20:13:40,121 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38706, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-11T20:13:40,125 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38845 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-11T20:13:40,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38845 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC 2024-12-11T20:13:40,135 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_PRE_OPERATION 2024-12-11T20:13:40,137 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38845 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestHBaseWalOnEC" procId is: 4 2024-12-11T20:13:40,138 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T20:13:40,140 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-11T20:13:40,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38845 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-11T20:13:40,149 WARN [PEWorker-3 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-11T20:13:40,149 WARN [PEWorker-3 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-11T20:13:40,153 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1152522736_22 at /127.0.0.1:37376 [Receiving block BP-1304524823-172.17.0.2-1733948012709:blk_-9223372036854775680_1020] {}] datanode.DataXceiver(331): 127.0.0.1:43873:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:37376 dst: /127.0.0.1:43873 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T20:13:40,162 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43873 is added to blk_-9223372036854775680_1021 (size=392) 2024-12-11T20:13:40,163 WARN [PEWorker-3 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-11T20:13:40,165 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => adee02a271dbc6d8c4bf260a81210912, NAME => 'TestHBaseWalOnEC,,1733948020122.adee02a271dbc6d8c4bf260a81210912.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:39591/user/jenkins/test-data/02e66db6-ee0b-0338-51ff-56955f597119 2024-12-11T20:13:40,174 WARN [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-11T20:13:40,174 WARN [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-11T20:13:40,181 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1152522736_22 at /127.0.0.1:46040 [Receiving block BP-1304524823-172.17.0.2-1733948012709:blk_-9223372036854775664_1022] {}] datanode.DataXceiver(331): 127.0.0.1:32959:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:46040 dst: /127.0.0.1:32959 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T20:13:40,185 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32959 is added to blk_-9223372036854775664_1023 (size=51) 2024-12-11T20:13:40,186 WARN [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-11T20:13:40,186 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1733948020122.adee02a271dbc6d8c4bf260a81210912.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T20:13:40,187 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1722): Closing adee02a271dbc6d8c4bf260a81210912, disabling compactions & flushes 2024-12-11T20:13:40,187 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1733948020122.adee02a271dbc6d8c4bf260a81210912. 2024-12-11T20:13:40,187 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1733948020122.adee02a271dbc6d8c4bf260a81210912. 2024-12-11T20:13:40,187 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1733948020122.adee02a271dbc6d8c4bf260a81210912. after waiting 0 ms 2024-12-11T20:13:40,187 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1733948020122.adee02a271dbc6d8c4bf260a81210912. 2024-12-11T20:13:40,187 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1733948020122.adee02a271dbc6d8c4bf260a81210912. 2024-12-11T20:13:40,187 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1676): Region close journal for adee02a271dbc6d8c4bf260a81210912: Waiting for close lock at 1733948020187Disabling compacts and flushes for region at 1733948020187Disabling writes for close at 1733948020187Writing region close event to WAL at 1733948020187Closed at 1733948020187 2024-12-11T20:13:40,190 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ADD_TO_META 2024-12-11T20:13:40,196 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestHBaseWalOnEC,,1733948020122.adee02a271dbc6d8c4bf260a81210912.","families":{"info":[{"qualifier":"regioninfo","vlen":50,"tag":[],"timestamp":"1733948020190"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733948020190"}]},"ts":"1733948020190"} 2024-12-11T20:13:40,202 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-11T20:13:40,205 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-11T20:13:40,208 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733948020205"}]},"ts":"1733948020205"} 2024-12-11T20:13:40,214 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLING in hbase:meta 2024-12-11T20:13:40,215 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {5be53b084ac7=0} racks are {/default-rack=0} 2024-12-11T20:13:40,216 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-11T20:13:40,216 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-11T20:13:40,216 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-11T20:13:40,216 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-11T20:13:40,216 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-11T20:13:40,217 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-11T20:13:40,217 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-11T20:13:40,217 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-11T20:13:40,217 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-11T20:13:40,217 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-11T20:13:40,219 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=adee02a271dbc6d8c4bf260a81210912, ASSIGN}] 2024-12-11T20:13:40,222 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=adee02a271dbc6d8c4bf260a81210912, ASSIGN 2024-12-11T20:13:40,224 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=adee02a271dbc6d8c4bf260a81210912, ASSIGN; state=OFFLINE, location=5be53b084ac7,44703,1733948017700; forceNewPlan=false, retain=false 2024-12-11T20:13:40,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38845 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-11T20:13:40,381 INFO [5be53b084ac7:38845 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-11T20:13:40,383 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=adee02a271dbc6d8c4bf260a81210912, regionState=OPENING, regionLocation=5be53b084ac7,44703,1733948017700 2024-12-11T20:13:40,387 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=adee02a271dbc6d8c4bf260a81210912, ASSIGN because future has completed 2024-12-11T20:13:40,388 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure adee02a271dbc6d8c4bf260a81210912, server=5be53b084ac7,44703,1733948017700}] 2024-12-11T20:13:40,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38845 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-11T20:13:40,551 INFO [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestHBaseWalOnEC,,1733948020122.adee02a271dbc6d8c4bf260a81210912. 2024-12-11T20:13:40,551 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => adee02a271dbc6d8c4bf260a81210912, NAME => 'TestHBaseWalOnEC,,1733948020122.adee02a271dbc6d8c4bf260a81210912.', STARTKEY => '', ENDKEY => ''} 2024-12-11T20:13:40,552 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestHBaseWalOnEC adee02a271dbc6d8c4bf260a81210912 2024-12-11T20:13:40,552 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1733948020122.adee02a271dbc6d8c4bf260a81210912.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T20:13:40,553 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for adee02a271dbc6d8c4bf260a81210912 2024-12-11T20:13:40,553 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for adee02a271dbc6d8c4bf260a81210912 2024-12-11T20:13:40,555 INFO [StoreOpener-adee02a271dbc6d8c4bf260a81210912-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region adee02a271dbc6d8c4bf260a81210912 2024-12-11T20:13:40,557 INFO [StoreOpener-adee02a271dbc6d8c4bf260a81210912-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region adee02a271dbc6d8c4bf260a81210912 columnFamilyName cf 2024-12-11T20:13:40,557 DEBUG [StoreOpener-adee02a271dbc6d8c4bf260a81210912-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T20:13:40,558 INFO [StoreOpener-adee02a271dbc6d8c4bf260a81210912-1 {}] regionserver.HStore(327): Store=adee02a271dbc6d8c4bf260a81210912/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T20:13:40,558 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for adee02a271dbc6d8c4bf260a81210912 2024-12-11T20:13:40,560 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39591/user/jenkins/test-data/02e66db6-ee0b-0338-51ff-56955f597119/data/default/TestHBaseWalOnEC/adee02a271dbc6d8c4bf260a81210912 2024-12-11T20:13:40,560 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39591/user/jenkins/test-data/02e66db6-ee0b-0338-51ff-56955f597119/data/default/TestHBaseWalOnEC/adee02a271dbc6d8c4bf260a81210912 2024-12-11T20:13:40,561 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for adee02a271dbc6d8c4bf260a81210912 2024-12-11T20:13:40,561 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for adee02a271dbc6d8c4bf260a81210912 2024-12-11T20:13:40,563 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for adee02a271dbc6d8c4bf260a81210912 2024-12-11T20:13:40,567 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39591/user/jenkins/test-data/02e66db6-ee0b-0338-51ff-56955f597119/data/default/TestHBaseWalOnEC/adee02a271dbc6d8c4bf260a81210912/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-11T20:13:40,568 INFO [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened adee02a271dbc6d8c4bf260a81210912; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=63224903, jitterRate=-0.05787552893161774}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-11T20:13:40,568 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for adee02a271dbc6d8c4bf260a81210912 2024-12-11T20:13:40,569 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for adee02a271dbc6d8c4bf260a81210912: Running coprocessor pre-open hook at 1733948020553Writing region info on filesystem at 1733948020553Initializing all the Stores at 1733948020555 (+2 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733948020555Cleaning up temporary data from old regions at 1733948020561 (+6 ms)Running coprocessor post-open hooks at 1733948020568 (+7 ms)Region opened successfully at 1733948020569 (+1 ms) 2024-12-11T20:13:40,571 INFO [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestHBaseWalOnEC,,1733948020122.adee02a271dbc6d8c4bf260a81210912., pid=6, masterSystemTime=1733948020542 2024-12-11T20:13:40,575 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestHBaseWalOnEC,,1733948020122.adee02a271dbc6d8c4bf260a81210912. 2024-12-11T20:13:40,575 INFO [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestHBaseWalOnEC,,1733948020122.adee02a271dbc6d8c4bf260a81210912. 2024-12-11T20:13:40,577 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=adee02a271dbc6d8c4bf260a81210912, regionState=OPEN, openSeqNum=2, regionLocation=5be53b084ac7,44703,1733948017700 2024-12-11T20:13:40,581 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure adee02a271dbc6d8c4bf260a81210912, server=5be53b084ac7,44703,1733948017700 because future has completed 2024-12-11T20:13:40,588 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-11T20:13:40,589 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure adee02a271dbc6d8c4bf260a81210912, server=5be53b084ac7,44703,1733948017700 in 195 msec 2024-12-11T20:13:40,591 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-11T20:13:40,592 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=adee02a271dbc6d8c4bf260a81210912, ASSIGN in 370 msec 2024-12-11T20:13:40,593 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-11T20:13:40,593 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733948020593"}]},"ts":"1733948020593"} 2024-12-11T20:13:40,596 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLED in hbase:meta 2024-12-11T20:13:40,597 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_POST_OPERATION 2024-12-11T20:13:40,601 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC in 469 msec 2024-12-11T20:13:40,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38845 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-11T20:13:40,774 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table TestHBaseWalOnEC get assigned. Timeout = 60000ms 2024-12-11T20:13:40,774 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestHBaseWalOnEC completed 2024-12-11T20:13:40,775 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-11T20:13:40,782 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table TestHBaseWalOnEC assigned to meta. Checking AM states. 2024-12-11T20:13:40,783 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-11T20:13:40,784 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table TestHBaseWalOnEC assigned. 2024-12-11T20:13:40,794 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestHBaseWalOnEC', row='row', locateType=CURRENT is [region=TestHBaseWalOnEC,,1733948020122.adee02a271dbc6d8c4bf260a81210912., hostname=5be53b084ac7,44703,1733948017700, seqNum=2] 2024-12-11T20:13:40,804 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38845 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestHBaseWalOnEC 2024-12-11T20:13:40,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38845 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC 2024-12-11T20:13:40,810 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_PREPARE 2024-12-11T20:13:40,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38845 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-11T20:13:40,812 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-11T20:13:40,813 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-11T20:13:40,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38845 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-11T20:13:40,976 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44703 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-12-11T20:13:40,977 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestHBaseWalOnEC,,1733948020122.adee02a271dbc6d8c4bf260a81210912. 2024-12-11T20:13:40,980 INFO [RS_FLUSH_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing adee02a271dbc6d8c4bf260a81210912 1/1 column families, dataSize=32 B heapSize=360 B 2024-12-11T20:13:41,036 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39591/user/jenkins/test-data/02e66db6-ee0b-0338-51ff-56955f597119/data/default/TestHBaseWalOnEC/adee02a271dbc6d8c4bf260a81210912/.tmp/cf/6c3088fef0a1435f958c01a79833ba48 is 36, key is row/cf:cq/1733948020796/Put/seqid=0 2024-12-11T20:13:41,042 WARN [RS_FLUSH_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-11T20:13:41,042 WARN [RS_FLUSH_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-11T20:13:41,047 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_626769337_22 at /127.0.0.1:42886 [Receiving block BP-1304524823-172.17.0.2-1733948012709:blk_-9223372036854775648_1024] {}] datanode.DataXceiver(331): 127.0.0.1:33711:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:42886 dst: /127.0.0.1:33711 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T20:13:41,055 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33711 is added to blk_-9223372036854775648_1025 (size=4787) 2024-12-11T20:13:41,056 WARN [RS_FLUSH_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-11T20:13:41,056 INFO [RS_FLUSH_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=32 B at sequenceid=5 (bloomFilter=false), to=hdfs://localhost:39591/user/jenkins/test-data/02e66db6-ee0b-0338-51ff-56955f597119/data/default/TestHBaseWalOnEC/adee02a271dbc6d8c4bf260a81210912/.tmp/cf/6c3088fef0a1435f958c01a79833ba48 2024-12-11T20:13:41,094 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39591/user/jenkins/test-data/02e66db6-ee0b-0338-51ff-56955f597119/data/default/TestHBaseWalOnEC/adee02a271dbc6d8c4bf260a81210912/.tmp/cf/6c3088fef0a1435f958c01a79833ba48 as hdfs://localhost:39591/user/jenkins/test-data/02e66db6-ee0b-0338-51ff-56955f597119/data/default/TestHBaseWalOnEC/adee02a271dbc6d8c4bf260a81210912/cf/6c3088fef0a1435f958c01a79833ba48 2024-12-11T20:13:41,107 INFO [RS_FLUSH_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39591/user/jenkins/test-data/02e66db6-ee0b-0338-51ff-56955f597119/data/default/TestHBaseWalOnEC/adee02a271dbc6d8c4bf260a81210912/cf/6c3088fef0a1435f958c01a79833ba48, entries=1, sequenceid=5, filesize=4.7 K 2024-12-11T20:13:41,113 INFO [RS_FLUSH_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~32 B/32, heapSize ~344 B/344, currentSize=0 B/0 for adee02a271dbc6d8c4bf260a81210912 in 132ms, sequenceid=5, compaction requested=false 2024-12-11T20:13:41,114 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestHBaseWalOnEC' 2024-12-11T20:13:41,115 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for adee02a271dbc6d8c4bf260a81210912: 2024-12-11T20:13:41,115 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestHBaseWalOnEC,,1733948020122.adee02a271dbc6d8c4bf260a81210912. 2024-12-11T20:13:41,116 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-12-11T20:13:41,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38845 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-12-11T20:13:41,126 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-12-11T20:13:41,127 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 308 msec 2024-12-11T20:13:41,131 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC in 323 msec 2024-12-11T20:13:41,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38845 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-11T20:13:41,132 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestHBaseWalOnEC completed 2024-12-11T20:13:41,147 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-11T20:13:41,147 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-11T20:13:41,147 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-11T20:13:41,151 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T20:13:41,152 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T20:13:41,152 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-11T20:13:41,152 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-11T20:13:41,152 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=519361184, stopped=false 2024-12-11T20:13:41,153 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=5be53b084ac7,38845,1733948016780 2024-12-11T20:13:41,168 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39105-0x1001690c3c70001, quorum=127.0.0.1:60082, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-11T20:13:41,168 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41591-0x1001690c3c70002, quorum=127.0.0.1:60082, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-11T20:13:41,168 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44703-0x1001690c3c70003, quorum=127.0.0.1:60082, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-11T20:13:41,168 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38845-0x1001690c3c70000, quorum=127.0.0.1:60082, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-11T20:13:41,168 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44703-0x1001690c3c70003, quorum=127.0.0.1:60082, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T20:13:41,168 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38845-0x1001690c3c70000, quorum=127.0.0.1:60082, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T20:13:41,168 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39105-0x1001690c3c70001, quorum=127.0.0.1:60082, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T20:13:41,168 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-11T20:13:41,168 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41591-0x1001690c3c70002, quorum=127.0.0.1:60082, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T20:13:41,169 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-11T20:13:41,169 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:38845-0x1001690c3c70000, quorum=127.0.0.1:60082, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-11T20:13:41,169 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-11T20:13:41,169 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T20:13:41,169 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:41591-0x1001690c3c70002, quorum=127.0.0.1:60082, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-11T20:13:41,169 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:44703-0x1001690c3c70003, quorum=127.0.0.1:60082, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-11T20:13:41,170 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '5be53b084ac7,39105,1733948017497' ***** 2024-12-11T20:13:41,170 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:39105-0x1001690c3c70001, quorum=127.0.0.1:60082, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-11T20:13:41,170 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-11T20:13:41,170 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '5be53b084ac7,41591,1733948017637' ***** 2024-12-11T20:13:41,170 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-11T20:13:41,170 INFO [RS:1;5be53b084ac7:41591 {}] regionserver.HeapMemoryManager(224): Stopping 2024-12-11T20:13:41,171 INFO [RS:1;5be53b084ac7:41591 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-11T20:13:41,171 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-11T20:13:41,171 INFO [RS:1;5be53b084ac7:41591 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-11T20:13:41,171 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '5be53b084ac7,44703,1733948017700' ***** 2024-12-11T20:13:41,171 INFO [RS:0;5be53b084ac7:39105 {}] regionserver.HeapMemoryManager(224): Stopping 2024-12-11T20:13:41,171 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-11T20:13:41,171 INFO [RS:1;5be53b084ac7:41591 {}] regionserver.HRegionServer(959): stopping server 5be53b084ac7,41591,1733948017637 2024-12-11T20:13:41,171 INFO [RS:0;5be53b084ac7:39105 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-11T20:13:41,171 INFO [RS:1;5be53b084ac7:41591 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-11T20:13:41,171 INFO [RS:0;5be53b084ac7:39105 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-11T20:13:41,171 INFO [RS:0;5be53b084ac7:39105 {}] regionserver.HRegionServer(959): stopping server 5be53b084ac7,39105,1733948017497 2024-12-11T20:13:41,171 INFO [RS:1;5be53b084ac7:41591 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;5be53b084ac7:41591. 2024-12-11T20:13:41,171 INFO [RS:0;5be53b084ac7:39105 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-11T20:13:41,171 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-11T20:13:41,171 DEBUG [RS:1;5be53b084ac7:41591 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-11T20:13:41,171 INFO [RS:0;5be53b084ac7:39105 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;5be53b084ac7:39105. 2024-12-11T20:13:41,171 DEBUG [RS:1;5be53b084ac7:41591 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T20:13:41,171 DEBUG [RS:0;5be53b084ac7:39105 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-11T20:13:41,172 INFO [RS:2;5be53b084ac7:44703 {}] regionserver.HeapMemoryManager(224): Stopping 2024-12-11T20:13:41,172 DEBUG [RS:0;5be53b084ac7:39105 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T20:13:41,172 INFO [RS:1;5be53b084ac7:41591 {}] regionserver.HRegionServer(976): stopping server 5be53b084ac7,41591,1733948017637; all regions closed. 2024-12-11T20:13:41,172 INFO [RS:0;5be53b084ac7:39105 {}] regionserver.HRegionServer(976): stopping server 5be53b084ac7,39105,1733948017497; all regions closed. 2024-12-11T20:13:41,172 INFO [RS:2;5be53b084ac7:44703 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-11T20:13:41,172 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-11T20:13:41,172 INFO [RS:2;5be53b084ac7:44703 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-11T20:13:41,172 INFO [RS:2;5be53b084ac7:44703 {}] regionserver.HRegionServer(3091): Received CLOSE for adee02a271dbc6d8c4bf260a81210912 2024-12-11T20:13:41,173 INFO [RS:2;5be53b084ac7:44703 {}] regionserver.HRegionServer(959): stopping server 5be53b084ac7,44703,1733948017700 2024-12-11T20:13:41,173 INFO [RS:2;5be53b084ac7:44703 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-11T20:13:41,173 INFO [RS:2;5be53b084ac7:44703 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:2;5be53b084ac7:44703. 2024-12-11T20:13:41,173 DEBUG [RS:2;5be53b084ac7:44703 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-11T20:13:41,173 DEBUG [RS:2;5be53b084ac7:44703 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T20:13:41,173 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing adee02a271dbc6d8c4bf260a81210912, disabling compactions & flushes 2024-12-11T20:13:41,173 INFO [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1733948020122.adee02a271dbc6d8c4bf260a81210912. 2024-12-11T20:13:41,173 INFO [RS:2;5be53b084ac7:44703 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-11T20:13:41,173 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1733948020122.adee02a271dbc6d8c4bf260a81210912. 2024-12-11T20:13:41,173 INFO [RS:2;5be53b084ac7:44703 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-11T20:13:41,173 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1733948020122.adee02a271dbc6d8c4bf260a81210912. after waiting 0 ms 2024-12-11T20:13:41,173 INFO [RS:2;5be53b084ac7:44703 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-11T20:13:41,173 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1733948020122.adee02a271dbc6d8c4bf260a81210912. 2024-12-11T20:13:41,173 INFO [RS:2;5be53b084ac7:44703 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-11T20:13:41,174 INFO [RS:2;5be53b084ac7:44703 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-12-11T20:13:41,174 DEBUG [RS:2;5be53b084ac7:44703 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, adee02a271dbc6d8c4bf260a81210912=TestHBaseWalOnEC,,1733948020122.adee02a271dbc6d8c4bf260a81210912.} 2024-12-11T20:13:41,174 DEBUG [RS:2;5be53b084ac7:44703 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, adee02a271dbc6d8c4bf260a81210912 2024-12-11T20:13:41,177 DEBUG [RS_CLOSE_META-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-11T20:13:41,177 INFO [RS_CLOSE_META-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-11T20:13:41,178 DEBUG [RS_CLOSE_META-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-11T20:13:41,178 DEBUG [RS_CLOSE_META-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-11T20:13:41,178 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33711 is added to blk_1073741826_1016 (size=93) 2024-12-11T20:13:41,178 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33711 is added to blk_1073741827_1017 (size=93) 2024-12-11T20:13:41,178 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43873 is added to blk_1073741826_1016 (size=93) 2024-12-11T20:13:41,178 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43873 is added to blk_1073741827_1017 (size=93) 2024-12-11T20:13:41,178 DEBUG [RS_CLOSE_META-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-11T20:13:41,179 INFO [RS_CLOSE_META-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.34 KB heapSize=3.38 KB 2024-12-11T20:13:41,179 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32959 is added to blk_1073741827_1017 (size=93) 2024-12-11T20:13:41,180 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32959 is added to blk_1073741826_1016 (size=93) 2024-12-11T20:13:41,186 DEBUG [RS:1;5be53b084ac7:41591 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/02e66db6-ee0b-0338-51ff-56955f597119/oldWALs 2024-12-11T20:13:41,186 INFO [RS:1;5be53b084ac7:41591 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 5be53b084ac7%2C41591%2C1733948017637:(num 1733948019216) 2024-12-11T20:13:41,186 DEBUG [RS:0;5be53b084ac7:39105 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/02e66db6-ee0b-0338-51ff-56955f597119/oldWALs 2024-12-11T20:13:41,186 DEBUG [RS:1;5be53b084ac7:41591 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T20:13:41,186 INFO [RS:0;5be53b084ac7:39105 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 5be53b084ac7%2C39105%2C1733948017497:(num 1733948019214) 2024-12-11T20:13:41,186 DEBUG [RS:0;5be53b084ac7:39105 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T20:13:41,186 INFO [RS:1;5be53b084ac7:41591 {}] regionserver.LeaseManager(133): Closed leases 2024-12-11T20:13:41,186 INFO [RS:0;5be53b084ac7:39105 {}] regionserver.LeaseManager(133): Closed leases 2024-12-11T20:13:41,186 INFO [RS:0;5be53b084ac7:39105 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-11T20:13:41,186 INFO [RS:1;5be53b084ac7:41591 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-11T20:13:41,187 INFO [RS:0;5be53b084ac7:39105 {}] hbase.ChoreService(370): Chore service for: regionserver/5be53b084ac7:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-12-11T20:13:41,187 INFO [RS:1;5be53b084ac7:41591 {}] hbase.ChoreService(370): Chore service for: regionserver/5be53b084ac7:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-11T20:13:41,187 INFO [RS:0;5be53b084ac7:39105 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-11T20:13:41,187 INFO [regionserver/5be53b084ac7:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-11T20:13:41,187 INFO [RS:1;5be53b084ac7:41591 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-11T20:13:41,187 INFO [RS:1;5be53b084ac7:41591 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-11T20:13:41,187 INFO [regionserver/5be53b084ac7:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-11T20:13:41,187 INFO [RS:1;5be53b084ac7:41591 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-11T20:13:41,187 INFO [RS:1;5be53b084ac7:41591 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-11T20:13:41,188 INFO [RS:1;5be53b084ac7:41591 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:41591 2024-12-11T20:13:41,187 INFO [RS:0;5be53b084ac7:39105 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-11T20:13:41,189 INFO [RS:0;5be53b084ac7:39105 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-11T20:13:41,189 INFO [RS:0;5be53b084ac7:39105 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-11T20:13:41,189 INFO [RS:0;5be53b084ac7:39105 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:39105 2024-12-11T20:13:41,193 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38845-0x1001690c3c70000, quorum=127.0.0.1:60082, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-11T20:13:41,193 INFO [RS:0;5be53b084ac7:39105 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-11T20:13:41,193 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41591-0x1001690c3c70002, quorum=127.0.0.1:60082, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/5be53b084ac7,41591,1733948017637 2024-12-11T20:13:41,193 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39105-0x1001690c3c70001, quorum=127.0.0.1:60082, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/5be53b084ac7,39105,1733948017497 2024-12-11T20:13:41,193 INFO [RS:1;5be53b084ac7:41591 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-11T20:13:41,202 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [5be53b084ac7,41591,1733948017637] 2024-12-11T20:13:41,205 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39591/user/jenkins/test-data/02e66db6-ee0b-0338-51ff-56955f597119/data/default/TestHBaseWalOnEC/adee02a271dbc6d8c4bf260a81210912/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-11T20:13:41,208 INFO [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1733948020122.adee02a271dbc6d8c4bf260a81210912. 2024-12-11T20:13:41,208 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for adee02a271dbc6d8c4bf260a81210912: Waiting for close lock at 1733948021173Running coprocessor pre-close hooks at 1733948021173Disabling compacts and flushes for region at 1733948021173Disabling writes for close at 1733948021173Writing region close event to WAL at 1733948021175 (+2 ms)Running coprocessor post-close hooks at 1733948021206 (+31 ms)Closed at 1733948021208 (+2 ms) 2024-12-11T20:13:41,209 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestHBaseWalOnEC,,1733948020122.adee02a271dbc6d8c4bf260a81210912. 2024-12-11T20:13:41,210 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/5be53b084ac7,41591,1733948017637 already deleted, retry=false 2024-12-11T20:13:41,210 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 5be53b084ac7,41591,1733948017637 expired; onlineServers=2 2024-12-11T20:13:41,210 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [5be53b084ac7,39105,1733948017497] 2024-12-11T20:13:41,218 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/5be53b084ac7,39105,1733948017497 already deleted, retry=false 2024-12-11T20:13:41,218 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 5be53b084ac7,39105,1733948017497 expired; onlineServers=1 2024-12-11T20:13:41,221 DEBUG [RS_CLOSE_META-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39591/user/jenkins/test-data/02e66db6-ee0b-0338-51ff-56955f597119/data/hbase/meta/1588230740/.tmp/info/bfa636c7dbe146799756ddfce12334bd is 153, key is TestHBaseWalOnEC,,1733948020122.adee02a271dbc6d8c4bf260a81210912./info:regioninfo/1733948020576/Put/seqid=0 2024-12-11T20:13:41,224 WARN [RS_CLOSE_META-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-11T20:13:41,224 WARN [RS_CLOSE_META-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-11T20:13:41,231 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_626769337_22 at /127.0.0.1:50410 [Receiving block BP-1304524823-172.17.0.2-1733948012709:blk_-9223372036854775632_1026] {}] datanode.DataXceiver(331): 127.0.0.1:43873:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:50410 dst: /127.0.0.1:43873 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T20:13:41,235 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43873 is added to blk_-9223372036854775632_1027 (size=6637) 2024-12-11T20:13:41,235 WARN [RS_CLOSE_META-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-11T20:13:41,236 INFO [RS_CLOSE_META-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.18 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:39591/user/jenkins/test-data/02e66db6-ee0b-0338-51ff-56955f597119/data/hbase/meta/1588230740/.tmp/info/bfa636c7dbe146799756ddfce12334bd 2024-12-11T20:13:41,258 INFO [regionserver/5be53b084ac7:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-11T20:13:41,264 INFO [regionserver/5be53b084ac7:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-11T20:13:41,264 INFO [regionserver/5be53b084ac7:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-11T20:13:41,266 DEBUG [RS_CLOSE_META-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39591/user/jenkins/test-data/02e66db6-ee0b-0338-51ff-56955f597119/data/hbase/meta/1588230740/.tmp/ns/a48fd45e0b3e447ea2daed2d5fe13a20 is 43, key is default/ns:d/1733948019902/Put/seqid=0 2024-12-11T20:13:41,269 WARN [RS_CLOSE_META-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-11T20:13:41,269 WARN [RS_CLOSE_META-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-11T20:13:41,272 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_626769337_22 at /127.0.0.1:53344 [Receiving block BP-1304524823-172.17.0.2-1733948012709:blk_-9223372036854775616_1028] {}] datanode.DataXceiver(331): 127.0.0.1:32959:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:53344 dst: /127.0.0.1:32959 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T20:13:41,279 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32959 is added to blk_-9223372036854775616_1029 (size=5153) 2024-12-11T20:13:41,280 WARN [RS_CLOSE_META-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-11T20:13:41,280 INFO [RS_CLOSE_META-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:39591/user/jenkins/test-data/02e66db6-ee0b-0338-51ff-56955f597119/data/hbase/meta/1588230740/.tmp/ns/a48fd45e0b3e447ea2daed2d5fe13a20 2024-12-11T20:13:41,303 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39105-0x1001690c3c70001, quorum=127.0.0.1:60082, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-11T20:13:41,303 INFO [RS:1;5be53b084ac7:41591 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-11T20:13:41,303 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39105-0x1001690c3c70001, quorum=127.0.0.1:60082, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-11T20:13:41,303 INFO [RS:0;5be53b084ac7:39105 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-11T20:13:41,303 INFO [RS:1;5be53b084ac7:41591 {}] regionserver.HRegionServer(1031): Exiting; stopping=5be53b084ac7,41591,1733948017637; zookeeper connection closed. 2024-12-11T20:13:41,303 INFO [RS:0;5be53b084ac7:39105 {}] regionserver.HRegionServer(1031): Exiting; stopping=5be53b084ac7,39105,1733948017497; zookeeper connection closed. 2024-12-11T20:13:41,303 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41591-0x1001690c3c70002, quorum=127.0.0.1:60082, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-11T20:13:41,303 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41591-0x1001690c3c70002, quorum=127.0.0.1:60082, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-11T20:13:41,303 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@4218998f {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@4218998f 2024-12-11T20:13:41,305 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@24760161 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@24760161 2024-12-11T20:13:41,314 DEBUG [RS_CLOSE_META-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39591/user/jenkins/test-data/02e66db6-ee0b-0338-51ff-56955f597119/data/hbase/meta/1588230740/.tmp/table/4f94bc5e343644bcb39adf4c34490071 is 52, key is TestHBaseWalOnEC/table:state/1733948020593/Put/seqid=0 2024-12-11T20:13:41,316 WARN [RS_CLOSE_META-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-11T20:13:41,316 WARN [RS_CLOSE_META-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-11T20:13:41,320 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_626769337_22 at /127.0.0.1:53358 [Receiving block BP-1304524823-172.17.0.2-1733948012709:blk_-9223372036854775600_1030] {}] datanode.DataXceiver(331): 127.0.0.1:32959:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:53358 dst: /127.0.0.1:32959 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T20:13:41,324 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32959 is added to blk_-9223372036854775600_1031 (size=5249) 2024-12-11T20:13:41,325 WARN [RS_CLOSE_META-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-11T20:13:41,325 INFO [RS_CLOSE_META-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=96 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:39591/user/jenkins/test-data/02e66db6-ee0b-0338-51ff-56955f597119/data/hbase/meta/1588230740/.tmp/table/4f94bc5e343644bcb39adf4c34490071 2024-12-11T20:13:41,336 DEBUG [RS_CLOSE_META-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39591/user/jenkins/test-data/02e66db6-ee0b-0338-51ff-56955f597119/data/hbase/meta/1588230740/.tmp/info/bfa636c7dbe146799756ddfce12334bd as hdfs://localhost:39591/user/jenkins/test-data/02e66db6-ee0b-0338-51ff-56955f597119/data/hbase/meta/1588230740/info/bfa636c7dbe146799756ddfce12334bd 2024-12-11T20:13:41,346 INFO [RS_CLOSE_META-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39591/user/jenkins/test-data/02e66db6-ee0b-0338-51ff-56955f597119/data/hbase/meta/1588230740/info/bfa636c7dbe146799756ddfce12334bd, entries=10, sequenceid=11, filesize=6.5 K 2024-12-11T20:13:41,348 DEBUG [RS_CLOSE_META-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39591/user/jenkins/test-data/02e66db6-ee0b-0338-51ff-56955f597119/data/hbase/meta/1588230740/.tmp/ns/a48fd45e0b3e447ea2daed2d5fe13a20 as hdfs://localhost:39591/user/jenkins/test-data/02e66db6-ee0b-0338-51ff-56955f597119/data/hbase/meta/1588230740/ns/a48fd45e0b3e447ea2daed2d5fe13a20 2024-12-11T20:13:41,359 INFO [RS_CLOSE_META-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39591/user/jenkins/test-data/02e66db6-ee0b-0338-51ff-56955f597119/data/hbase/meta/1588230740/ns/a48fd45e0b3e447ea2daed2d5fe13a20, entries=2, sequenceid=11, filesize=5.0 K 2024-12-11T20:13:41,361 DEBUG [RS_CLOSE_META-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39591/user/jenkins/test-data/02e66db6-ee0b-0338-51ff-56955f597119/data/hbase/meta/1588230740/.tmp/table/4f94bc5e343644bcb39adf4c34490071 as hdfs://localhost:39591/user/jenkins/test-data/02e66db6-ee0b-0338-51ff-56955f597119/data/hbase/meta/1588230740/table/4f94bc5e343644bcb39adf4c34490071 2024-12-11T20:13:41,375 DEBUG [RS:2;5be53b084ac7:44703 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-12-11T20:13:41,376 INFO [RS_CLOSE_META-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39591/user/jenkins/test-data/02e66db6-ee0b-0338-51ff-56955f597119/data/hbase/meta/1588230740/table/4f94bc5e343644bcb39adf4c34490071, entries=2, sequenceid=11, filesize=5.1 K 2024-12-11T20:13:41,379 INFO [RS_CLOSE_META-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 201ms, sequenceid=11, compaction requested=false 2024-12-11T20:13:41,379 DEBUG [RS_CLOSE_META-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-11T20:13:41,399 DEBUG [RS_CLOSE_META-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39591/user/jenkins/test-data/02e66db6-ee0b-0338-51ff-56955f597119/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-12-11T20:13:41,400 DEBUG [RS_CLOSE_META-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-11T20:13:41,400 INFO [RS_CLOSE_META-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-11T20:13:41,400 DEBUG [RS_CLOSE_META-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733948021177Running coprocessor pre-close hooks at 1733948021177Disabling compacts and flushes for region at 1733948021177Disabling writes for close at 1733948021178 (+1 ms)Obtaining lock to block concurrent updates at 1733948021179 (+1 ms)Preparing flush snapshotting stores in 1588230740 at 1733948021179Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1377, getHeapSize=3392, getOffHeapSize=0, getCellsCount=14 at 1733948021180 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1733948021181 (+1 ms)Flushing 1588230740/info: creating writer at 1733948021182 (+1 ms)Flushing 1588230740/info: appending metadata at 1733948021219 (+37 ms)Flushing 1588230740/info: closing flushed file at 1733948021219Flushing 1588230740/ns: creating writer at 1733948021246 (+27 ms)Flushing 1588230740/ns: appending metadata at 1733948021265 (+19 ms)Flushing 1588230740/ns: closing flushed file at 1733948021265Flushing 1588230740/table: creating writer at 1733948021292 (+27 ms)Flushing 1588230740/table: appending metadata at 1733948021312 (+20 ms)Flushing 1588230740/table: closing flushed file at 1733948021312Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7b3724f5: reopening flushed file at 1733948021335 (+23 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1827d165: reopening flushed file at 1733948021346 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@428e0cb2: reopening flushed file at 1733948021359 (+13 ms)Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 201ms, sequenceid=11, compaction requested=false at 1733948021379 (+20 ms)Writing region close event to WAL at 1733948021387 (+8 ms)Running coprocessor post-close hooks at 1733948021400 (+13 ms)Closed at 1733948021400 2024-12-11T20:13:41,400 DEBUG [RS_CLOSE_META-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-11T20:13:41,575 INFO [RS:2;5be53b084ac7:44703 {}] regionserver.HRegionServer(976): stopping server 5be53b084ac7,44703,1733948017700; all regions closed. 2024-12-11T20:13:41,579 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33711 is added to blk_1073741829_1019 (size=2751) 2024-12-11T20:13:41,579 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43873 is added to blk_1073741829_1019 (size=2751) 2024-12-11T20:13:41,579 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32959 is added to blk_1073741829_1019 (size=2751) 2024-12-11T20:13:41,583 DEBUG [RS:2;5be53b084ac7:44703 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/02e66db6-ee0b-0338-51ff-56955f597119/oldWALs 2024-12-11T20:13:41,583 INFO [RS:2;5be53b084ac7:44703 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 5be53b084ac7%2C44703%2C1733948017700.meta:.meta(num 1733948019656) 2024-12-11T20:13:41,587 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33711 is added to blk_1073741828_1018 (size=1298) 2024-12-11T20:13:41,587 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32959 is added to blk_1073741828_1018 (size=1298) 2024-12-11T20:13:41,587 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43873 is added to blk_1073741828_1018 (size=1298) 2024-12-11T20:13:41,590 DEBUG [RS:2;5be53b084ac7:44703 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/02e66db6-ee0b-0338-51ff-56955f597119/oldWALs 2024-12-11T20:13:41,590 INFO [RS:2;5be53b084ac7:44703 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 5be53b084ac7%2C44703%2C1733948017700:(num 1733948019218) 2024-12-11T20:13:41,590 DEBUG [RS:2;5be53b084ac7:44703 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T20:13:41,590 INFO [RS:2;5be53b084ac7:44703 {}] regionserver.LeaseManager(133): Closed leases 2024-12-11T20:13:41,590 INFO [RS:2;5be53b084ac7:44703 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-11T20:13:41,590 INFO [RS:2;5be53b084ac7:44703 {}] hbase.ChoreService(370): Chore service for: regionserver/5be53b084ac7:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-11T20:13:41,590 INFO [RS:2;5be53b084ac7:44703 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-11T20:13:41,590 INFO [regionserver/5be53b084ac7:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-11T20:13:41,590 INFO [RS:2;5be53b084ac7:44703 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:44703 2024-12-11T20:13:41,622 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43873 is added to blk_-9223372036854775773_1004 (size=42) 2024-12-11T20:13:41,623 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33711 is added to blk_-9223372036854775757_1006 (size=196) 2024-12-11T20:13:41,625 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43873 is added to blk_-9223372036854775756_1006 (size=196) 2024-12-11T20:13:41,625 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32959 is added to blk_-9223372036854775740_1008 (size=1189) 2024-12-11T20:13:41,626 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33711 is added to blk_-9223372036854775772_1004 (size=42) 2024-12-11T20:13:41,626 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44703-0x1001690c3c70003, quorum=127.0.0.1:60082, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/5be53b084ac7,44703,1733948017700 2024-12-11T20:13:41,627 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38845-0x1001690c3c70000, quorum=127.0.0.1:60082, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-11T20:13:41,627 INFO [RS:2;5be53b084ac7:44703 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-11T20:13:41,627 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33711 is added to blk_-9223372036854775741_1008 (size=1189) 2024-12-11T20:13:41,628 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [5be53b084ac7,44703,1733948017700] 2024-12-11T20:13:41,629 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43873 is added to blk_-9223372036854775724_1010 (size=34) 2024-12-11T20:13:41,629 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32959 is added to blk_-9223372036854775725_1010 (size=34) 2024-12-11T20:13:41,631 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43873 is added to blk_-9223372036854775708_1013 (size=1321) 2024-12-11T20:13:41,631 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32959 is added to blk_-9223372036854775709_1013 (size=1321) 2024-12-11T20:13:41,643 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/5be53b084ac7,44703,1733948017700 already deleted, retry=false 2024-12-11T20:13:41,643 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 5be53b084ac7,44703,1733948017700 expired; onlineServers=0 2024-12-11T20:13:41,643 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '5be53b084ac7,38845,1733948016780' ***** 2024-12-11T20:13:41,643 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-11T20:13:41,643 INFO [M:0;5be53b084ac7:38845 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-11T20:13:41,644 INFO [M:0;5be53b084ac7:38845 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-11T20:13:41,644 DEBUG [M:0;5be53b084ac7:38845 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-11T20:13:41,644 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-11T20:13:41,644 DEBUG [M:0;5be53b084ac7:38845 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-11T20:13:41,644 DEBUG [master/5be53b084ac7:0:becomeActiveMaster-HFileCleaner.small.0-1733948018819 {}] cleaner.HFileCleaner(306): Exit Thread[master/5be53b084ac7:0:becomeActiveMaster-HFileCleaner.small.0-1733948018819,5,FailOnTimeoutGroup] 2024-12-11T20:13:41,644 DEBUG [master/5be53b084ac7:0:becomeActiveMaster-HFileCleaner.large.0-1733948018816 {}] cleaner.HFileCleaner(306): Exit Thread[master/5be53b084ac7:0:becomeActiveMaster-HFileCleaner.large.0-1733948018816,5,FailOnTimeoutGroup] 2024-12-11T20:13:41,644 INFO [M:0;5be53b084ac7:38845 {}] hbase.ChoreService(370): Chore service for: master/5be53b084ac7:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-11T20:13:41,644 INFO [M:0;5be53b084ac7:38845 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-11T20:13:41,644 DEBUG [M:0;5be53b084ac7:38845 {}] master.HMaster(1795): Stopping service threads 2024-12-11T20:13:41,644 INFO [M:0;5be53b084ac7:38845 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-11T20:13:41,644 INFO [M:0;5be53b084ac7:38845 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-11T20:13:41,645 INFO [M:0;5be53b084ac7:38845 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-11T20:13:41,645 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-11T20:13:41,651 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38845-0x1001690c3c70000, quorum=127.0.0.1:60082, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-11T20:13:41,652 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38845-0x1001690c3c70000, quorum=127.0.0.1:60082, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T20:13:41,652 DEBUG [M:0;5be53b084ac7:38845 {}] zookeeper.ZKUtil(347): master:38845-0x1001690c3c70000, quorum=127.0.0.1:60082, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-11T20:13:41,652 WARN [M:0;5be53b084ac7:38845 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-11T20:13:41,653 INFO [M:0;5be53b084ac7:38845 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:39591/user/jenkins/test-data/02e66db6-ee0b-0338-51ff-56955f597119/.lastflushedseqids 2024-12-11T20:13:41,664 WARN [M:0;5be53b084ac7:38845 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-11T20:13:41,664 WARN [M:0;5be53b084ac7:38845 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-11T20:13:41,666 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1152522736_22 at /127.0.0.1:50472 [Receiving block BP-1304524823-172.17.0.2-1733948012709:blk_-9223372036854775584_1032] {}] datanode.DataXceiver(331): 127.0.0.1:43873:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:50472 dst: /127.0.0.1:43873 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T20:13:41,670 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43873 is added to blk_-9223372036854775584_1033 (size=127) 2024-12-11T20:13:41,670 WARN [M:0;5be53b084ac7:38845 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-11T20:13:41,670 INFO [M:0;5be53b084ac7:38845 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-11T20:13:41,670 INFO [M:0;5be53b084ac7:38845 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-11T20:13:41,671 DEBUG [M:0;5be53b084ac7:38845 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-11T20:13:41,671 INFO [M:0;5be53b084ac7:38845 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-11T20:13:41,671 DEBUG [M:0;5be53b084ac7:38845 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-11T20:13:41,671 DEBUG [M:0;5be53b084ac7:38845 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-11T20:13:41,671 DEBUG [M:0;5be53b084ac7:38845 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-11T20:13:41,671 INFO [M:0;5be53b084ac7:38845 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=26.84 KB heapSize=34.13 KB 2024-12-11T20:13:41,689 DEBUG [M:0;5be53b084ac7:38845 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39591/user/jenkins/test-data/02e66db6-ee0b-0338-51ff-56955f597119/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/4c87d9f7ca834329b3ea13fd2cf46973 is 82, key is hbase:meta,,1/info:regioninfo/1733948019740/Put/seqid=0 2024-12-11T20:13:41,692 WARN [M:0;5be53b084ac7:38845 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-11T20:13:41,692 WARN [M:0;5be53b084ac7:38845 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-11T20:13:41,695 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1152522736_22 at /127.0.0.1:42946 [Receiving block BP-1304524823-172.17.0.2-1733948012709:blk_-9223372036854775568_1034] {}] datanode.DataXceiver(331): 127.0.0.1:33711:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:42946 dst: /127.0.0.1:33711 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T20:13:41,700 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33711 is added to blk_-9223372036854775568_1035 (size=5672) 2024-12-11T20:13:41,701 WARN [M:0;5be53b084ac7:38845 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-11T20:13:41,701 INFO [M:0;5be53b084ac7:38845 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:39591/user/jenkins/test-data/02e66db6-ee0b-0338-51ff-56955f597119/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/4c87d9f7ca834329b3ea13fd2cf46973 2024-12-11T20:13:41,729 DEBUG [M:0;5be53b084ac7:38845 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39591/user/jenkins/test-data/02e66db6-ee0b-0338-51ff-56955f597119/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/ba584f0bbcb84a91b5b9dc1b552450bd is 749, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1733948020600/Put/seqid=0 2024-12-11T20:13:41,731 WARN [M:0;5be53b084ac7:38845 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-11T20:13:41,731 WARN [M:0;5be53b084ac7:38845 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-11T20:13:41,734 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1152522736_22 at /127.0.0.1:50490 [Receiving block BP-1304524823-172.17.0.2-1733948012709:blk_-9223372036854775552_1036] {}] datanode.DataXceiver(331): 127.0.0.1:43873:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:50490 dst: /127.0.0.1:43873 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T20:13:41,735 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44703-0x1001690c3c70003, quorum=127.0.0.1:60082, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-11T20:13:41,735 INFO [RS:2;5be53b084ac7:44703 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-11T20:13:41,735 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44703-0x1001690c3c70003, quorum=127.0.0.1:60082, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-11T20:13:41,735 INFO [RS:2;5be53b084ac7:44703 {}] regionserver.HRegionServer(1031): Exiting; stopping=5be53b084ac7,44703,1733948017700; zookeeper connection closed. 2024-12-11T20:13:41,736 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@79efe7db {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@79efe7db 2024-12-11T20:13:41,736 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 3 regionserver(s) complete 2024-12-11T20:13:41,738 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43873 is added to blk_-9223372036854775552_1037 (size=6440) 2024-12-11T20:13:41,738 WARN [M:0;5be53b084ac7:38845 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-11T20:13:41,739 INFO [M:0;5be53b084ac7:38845 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.16 KB at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:39591/user/jenkins/test-data/02e66db6-ee0b-0338-51ff-56955f597119/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/ba584f0bbcb84a91b5b9dc1b552450bd 2024-12-11T20:13:41,760 DEBUG [M:0;5be53b084ac7:38845 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39591/user/jenkins/test-data/02e66db6-ee0b-0338-51ff-56955f597119/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/320882b6978b4f2e8e0c633934e6054e is 69, key is 5be53b084ac7,39105,1733948017497/rs:state/1733948018870/Put/seqid=0 2024-12-11T20:13:41,761 WARN [M:0;5be53b084ac7:38845 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-11T20:13:41,761 WARN [M:0;5be53b084ac7:38845 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-11T20:13:41,764 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1152522736_22 at /127.0.0.1:50502 [Receiving block BP-1304524823-172.17.0.2-1733948012709:blk_-9223372036854775536_1038] {}] datanode.DataXceiver(331): 127.0.0.1:43873:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:50502 dst: /127.0.0.1:43873 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T20:13:41,767 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43873 is added to blk_-9223372036854775536_1039 (size=5294) 2024-12-11T20:13:41,768 WARN [M:0;5be53b084ac7:38845 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-11T20:13:41,768 INFO [M:0;5be53b084ac7:38845 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=195 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:39591/user/jenkins/test-data/02e66db6-ee0b-0338-51ff-56955f597119/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/320882b6978b4f2e8e0c633934e6054e 2024-12-11T20:13:41,777 DEBUG [M:0;5be53b084ac7:38845 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39591/user/jenkins/test-data/02e66db6-ee0b-0338-51ff-56955f597119/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/4c87d9f7ca834329b3ea13fd2cf46973 as hdfs://localhost:39591/user/jenkins/test-data/02e66db6-ee0b-0338-51ff-56955f597119/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/4c87d9f7ca834329b3ea13fd2cf46973 2024-12-11T20:13:41,783 INFO [M:0;5be53b084ac7:38845 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39591/user/jenkins/test-data/02e66db6-ee0b-0338-51ff-56955f597119/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/4c87d9f7ca834329b3ea13fd2cf46973, entries=8, sequenceid=72, filesize=5.5 K 2024-12-11T20:13:41,785 DEBUG [M:0;5be53b084ac7:38845 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39591/user/jenkins/test-data/02e66db6-ee0b-0338-51ff-56955f597119/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/ba584f0bbcb84a91b5b9dc1b552450bd as hdfs://localhost:39591/user/jenkins/test-data/02e66db6-ee0b-0338-51ff-56955f597119/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/ba584f0bbcb84a91b5b9dc1b552450bd 2024-12-11T20:13:41,792 INFO [M:0;5be53b084ac7:38845 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39591/user/jenkins/test-data/02e66db6-ee0b-0338-51ff-56955f597119/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/ba584f0bbcb84a91b5b9dc1b552450bd, entries=8, sequenceid=72, filesize=6.3 K 2024-12-11T20:13:41,793 DEBUG [M:0;5be53b084ac7:38845 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39591/user/jenkins/test-data/02e66db6-ee0b-0338-51ff-56955f597119/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/320882b6978b4f2e8e0c633934e6054e as hdfs://localhost:39591/user/jenkins/test-data/02e66db6-ee0b-0338-51ff-56955f597119/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/320882b6978b4f2e8e0c633934e6054e 2024-12-11T20:13:41,801 INFO [M:0;5be53b084ac7:38845 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39591/user/jenkins/test-data/02e66db6-ee0b-0338-51ff-56955f597119/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/320882b6978b4f2e8e0c633934e6054e, entries=3, sequenceid=72, filesize=5.2 K 2024-12-11T20:13:41,802 INFO [M:0;5be53b084ac7:38845 {}] regionserver.HRegion(3140): Finished flush of dataSize ~26.84 KB/27483, heapSize ~33.83 KB/34640, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 131ms, sequenceid=72, compaction requested=false 2024-12-11T20:13:41,803 INFO [M:0;5be53b084ac7:38845 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-11T20:13:41,803 DEBUG [M:0;5be53b084ac7:38845 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733948021671Disabling compacts and flushes for region at 1733948021671Disabling writes for close at 1733948021671Obtaining lock to block concurrent updates at 1733948021671Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733948021671Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=27483, getHeapSize=34880, getOffHeapSize=0, getCellsCount=85 at 1733948021671Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733948021672 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733948021672Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733948021689 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733948021689Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733948021709 (+20 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733948021729 (+20 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733948021729Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733948021746 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733948021759 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733948021759Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7610379: reopening flushed file at 1733948021775 (+16 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4934d7fa: reopening flushed file at 1733948021784 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@67e16bc0: reopening flushed file at 1733948021792 (+8 ms)Finished flush of dataSize ~26.84 KB/27483, heapSize ~33.83 KB/34640, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 131ms, sequenceid=72, compaction requested=false at 1733948021802 (+10 ms)Writing region close event to WAL at 1733948021803 (+1 ms)Closed at 1733948021803 2024-12-11T20:13:41,806 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32959 is added to blk_1073741825_1011 (size=32686) 2024-12-11T20:13:41,807 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33711 is added to blk_1073741825_1011 (size=32686) 2024-12-11T20:13:41,807 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43873 is added to blk_1073741825_1011 (size=32686) 2024-12-11T20:13:41,808 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-11T20:13:41,808 INFO [M:0;5be53b084ac7:38845 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-11T20:13:41,808 INFO [M:0;5be53b084ac7:38845 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:38845 2024-12-11T20:13:41,808 INFO [M:0;5be53b084ac7:38845 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-11T20:13:41,952 INFO [M:0;5be53b084ac7:38845 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-11T20:13:41,952 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38845-0x1001690c3c70000, quorum=127.0.0.1:60082, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-11T20:13:41,952 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38845-0x1001690c3c70000, quorum=127.0.0.1:60082, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-11T20:13:41,956 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6d005cc8{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-11T20:13:41,958 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@492d1201{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-11T20:13:41,958 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-11T20:13:41,958 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@a4f4410{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-11T20:13:41,958 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@77df1a06{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f65357e6-5c86-3670-a7ed-6bc039014a37/hadoop.log.dir/,STOPPED} 2024-12-11T20:13:41,960 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-11T20:13:41,960 WARN [BP-1304524823-172.17.0.2-1733948012709 heartbeating to localhost/127.0.0.1:39591 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-11T20:13:41,961 WARN [BP-1304524823-172.17.0.2-1733948012709 heartbeating to localhost/127.0.0.1:39591 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1304524823-172.17.0.2-1733948012709 (Datanode Uuid f0fb4ded-ec4d-4d60-931d-f945b45cd7d6) service to localhost/127.0.0.1:39591 2024-12-11T20:13:41,961 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-11T20:13:41,962 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f65357e6-5c86-3670-a7ed-6bc039014a37/cluster_8ea2a013-5a77-5a09-3916-877d4fbe9f5c/data/data5/current/BP-1304524823-172.17.0.2-1733948012709 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-11T20:13:41,962 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f65357e6-5c86-3670-a7ed-6bc039014a37/cluster_8ea2a013-5a77-5a09-3916-877d4fbe9f5c/data/data6/current/BP-1304524823-172.17.0.2-1733948012709 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-11T20:13:41,963 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-11T20:13:41,965 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@51be63ee{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-11T20:13:41,966 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@106158ef{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-11T20:13:41,966 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-11T20:13:41,966 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@31fc7e57{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-11T20:13:41,966 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1f0232ec{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f65357e6-5c86-3670-a7ed-6bc039014a37/hadoop.log.dir/,STOPPED} 2024-12-11T20:13:41,967 WARN [BP-1304524823-172.17.0.2-1733948012709 heartbeating to localhost/127.0.0.1:39591 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-11T20:13:41,968 WARN [BP-1304524823-172.17.0.2-1733948012709 heartbeating to localhost/127.0.0.1:39591 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1304524823-172.17.0.2-1733948012709 (Datanode Uuid b4bcb877-8c42-4b47-b2b4-c6fbb4af4428) service to localhost/127.0.0.1:39591 2024-12-11T20:13:41,968 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f65357e6-5c86-3670-a7ed-6bc039014a37/cluster_8ea2a013-5a77-5a09-3916-877d4fbe9f5c/data/data3/current/BP-1304524823-172.17.0.2-1733948012709 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-11T20:13:41,969 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f65357e6-5c86-3670-a7ed-6bc039014a37/cluster_8ea2a013-5a77-5a09-3916-877d4fbe9f5c/data/data4/current/BP-1304524823-172.17.0.2-1733948012709 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-11T20:13:41,969 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-11T20:13:41,969 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-11T20:13:41,969 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-11T20:13:41,971 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@68c42837{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-11T20:13:41,972 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@736038db{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-11T20:13:41,972 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-11T20:13:41,972 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4802e856{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-11T20:13:41,972 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@26fd7980{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f65357e6-5c86-3670-a7ed-6bc039014a37/hadoop.log.dir/,STOPPED} 2024-12-11T20:13:41,973 WARN [BP-1304524823-172.17.0.2-1733948012709 heartbeating to localhost/127.0.0.1:39591 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-11T20:13:41,973 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-11T20:13:41,973 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-11T20:13:41,973 WARN [BP-1304524823-172.17.0.2-1733948012709 heartbeating to localhost/127.0.0.1:39591 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1304524823-172.17.0.2-1733948012709 (Datanode Uuid 8ad5220c-4cf3-4aa7-9a31-86d49a2e410d) service to localhost/127.0.0.1:39591 2024-12-11T20:13:41,974 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f65357e6-5c86-3670-a7ed-6bc039014a37/cluster_8ea2a013-5a77-5a09-3916-877d4fbe9f5c/data/data1/current/BP-1304524823-172.17.0.2-1733948012709 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-11T20:13:41,974 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f65357e6-5c86-3670-a7ed-6bc039014a37/cluster_8ea2a013-5a77-5a09-3916-877d4fbe9f5c/data/data2/current/BP-1304524823-172.17.0.2-1733948012709 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-11T20:13:41,975 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-11T20:13:41,982 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@753cff0b{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-11T20:13:41,983 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@78567fa0{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-11T20:13:41,983 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-11T20:13:41,983 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@c6a701e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-11T20:13:41,983 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2b4eb733{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f65357e6-5c86-3670-a7ed-6bc039014a37/hadoop.log.dir/,STOPPED} 2024-12-11T20:13:41,992 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-11T20:13:42,020 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-11T20:13:42,027 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestHBaseWalOnEC#testReadWrite[0] Thread=85 (was 157), OpenFileDescriptor=439 (was 391) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=1047 (was 1121), ProcessCount=11 (was 11), AvailableMemoryMB=4712 (was 5050) 2024-12-11T20:13:42,033 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestHBaseWalOnEC#testReadWrite[1] Thread=85, OpenFileDescriptor=439, MaxFileDescriptor=1048576, SystemLoadAverage=1047, ProcessCount=11, AvailableMemoryMB=4712 2024-12-11T20:13:42,033 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=3, rsPorts=, rsClass=null, numDataNodes=3, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-11T20:13:42,034 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f65357e6-5c86-3670-a7ed-6bc039014a37/hadoop.log.dir so I do NOT create it in target/test-data/07e5cb37-a8ad-e6c3-4f33-b29594635d59 2024-12-11T20:13:42,034 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f65357e6-5c86-3670-a7ed-6bc039014a37/hadoop.tmp.dir so I do NOT create it in target/test-data/07e5cb37-a8ad-e6c3-4f33-b29594635d59 2024-12-11T20:13:42,034 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/07e5cb37-a8ad-e6c3-4f33-b29594635d59/cluster_05bd6f84-ac52-08df-75b7-2ab28b8012bd, deleteOnExit=true 2024-12-11T20:13:42,034 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-11T20:13:42,034 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/07e5cb37-a8ad-e6c3-4f33-b29594635d59/test.cache.data in system properties and HBase conf 2024-12-11T20:13:42,034 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/07e5cb37-a8ad-e6c3-4f33-b29594635d59/hadoop.tmp.dir in system properties and HBase conf 2024-12-11T20:13:42,034 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/07e5cb37-a8ad-e6c3-4f33-b29594635d59/hadoop.log.dir in system properties and HBase conf 2024-12-11T20:13:42,034 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/07e5cb37-a8ad-e6c3-4f33-b29594635d59/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-11T20:13:42,034 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/07e5cb37-a8ad-e6c3-4f33-b29594635d59/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-11T20:13:42,035 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-11T20:13:42,035 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-11T20:13:42,035 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/07e5cb37-a8ad-e6c3-4f33-b29594635d59/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-11T20:13:42,035 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/07e5cb37-a8ad-e6c3-4f33-b29594635d59/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-11T20:13:42,035 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/07e5cb37-a8ad-e6c3-4f33-b29594635d59/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-11T20:13:42,035 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/07e5cb37-a8ad-e6c3-4f33-b29594635d59/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-11T20:13:42,035 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/07e5cb37-a8ad-e6c3-4f33-b29594635d59/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-11T20:13:42,035 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/07e5cb37-a8ad-e6c3-4f33-b29594635d59/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-11T20:13:42,036 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/07e5cb37-a8ad-e6c3-4f33-b29594635d59/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-11T20:13:42,036 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/07e5cb37-a8ad-e6c3-4f33-b29594635d59/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-11T20:13:42,036 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/07e5cb37-a8ad-e6c3-4f33-b29594635d59/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-11T20:13:42,036 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/07e5cb37-a8ad-e6c3-4f33-b29594635d59/nfs.dump.dir in system properties and HBase conf 2024-12-11T20:13:42,036 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/07e5cb37-a8ad-e6c3-4f33-b29594635d59/java.io.tmpdir in system properties and HBase conf 2024-12-11T20:13:42,036 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/07e5cb37-a8ad-e6c3-4f33-b29594635d59/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-11T20:13:42,036 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/07e5cb37-a8ad-e6c3-4f33-b29594635d59/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-11T20:13:42,036 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/07e5cb37-a8ad-e6c3-4f33-b29594635d59/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-11T20:13:42,338 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-11T20:13:42,346 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-11T20:13:42,353 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-11T20:13:42,353 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-11T20:13:42,353 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-11T20:13:42,355 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-11T20:13:42,356 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7288ff9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/07e5cb37-a8ad-e6c3-4f33-b29594635d59/hadoop.log.dir/,AVAILABLE} 2024-12-11T20:13:42,356 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@94f688b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-11T20:13:42,470 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@64064f3d{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/07e5cb37-a8ad-e6c3-4f33-b29594635d59/java.io.tmpdir/jetty-localhost-40773-hadoop-hdfs-3_4_1-tests_jar-_-any-13438901188613482810/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-11T20:13:42,471 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@485e3231{HTTP/1.1, (http/1.1)}{localhost:40773} 2024-12-11T20:13:42,471 INFO [Time-limited test {}] server.Server(415): Started @11744ms 2024-12-11T20:13:42,691 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-11T20:13:42,696 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-11T20:13:42,696 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-11T20:13:42,697 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-11T20:13:42,697 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-11T20:13:42,697 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2d0b4a63{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/07e5cb37-a8ad-e6c3-4f33-b29594635d59/hadoop.log.dir/,AVAILABLE} 2024-12-11T20:13:42,698 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4218be64{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-11T20:13:42,823 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5cc92116{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/07e5cb37-a8ad-e6c3-4f33-b29594635d59/java.io.tmpdir/jetty-localhost-40713-hadoop-hdfs-3_4_1-tests_jar-_-any-5516664309534294893/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-11T20:13:42,824 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1ac2df30{HTTP/1.1, (http/1.1)}{localhost:40713} 2024-12-11T20:13:42,824 INFO [Time-limited test {}] server.Server(415): Started @12097ms 2024-12-11T20:13:42,826 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-11T20:13:42,868 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-11T20:13:42,871 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-11T20:13:42,872 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-11T20:13:42,873 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-11T20:13:42,873 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-11T20:13:42,873 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1c414977{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/07e5cb37-a8ad-e6c3-4f33-b29594635d59/hadoop.log.dir/,AVAILABLE} 2024-12-11T20:13:42,873 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@791ac62b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-11T20:13:42,969 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5a89ba0b{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/07e5cb37-a8ad-e6c3-4f33-b29594635d59/java.io.tmpdir/jetty-localhost-36089-hadoop-hdfs-3_4_1-tests_jar-_-any-760875366555971467/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-11T20:13:42,969 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@41ae5b4a{HTTP/1.1, (http/1.1)}{localhost:36089} 2024-12-11T20:13:42,970 INFO [Time-limited test {}] server.Server(415): Started @12242ms 2024-12-11T20:13:42,971 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-11T20:13:43,032 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-11T20:13:43,037 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-11T20:13:43,038 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-11T20:13:43,038 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-11T20:13:43,038 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-11T20:13:43,039 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6a8aabef{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/07e5cb37-a8ad-e6c3-4f33-b29594635d59/hadoop.log.dir/,AVAILABLE} 2024-12-11T20:13:43,039 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@29b80c96{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-11T20:13:43,146 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@151d7336{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/07e5cb37-a8ad-e6c3-4f33-b29594635d59/java.io.tmpdir/jetty-localhost-44973-hadoop-hdfs-3_4_1-tests_jar-_-any-6871499964463769720/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-11T20:13:43,147 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2c66e5d3{HTTP/1.1, (http/1.1)}{localhost:44973} 2024-12-11T20:13:43,147 INFO [Time-limited test {}] server.Server(415): Started @12420ms 2024-12-11T20:13:43,150 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-11T20:13:43,738 WARN [Thread-562 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/07e5cb37-a8ad-e6c3-4f33-b29594635d59/cluster_05bd6f84-ac52-08df-75b7-2ab28b8012bd/data/data1/current/BP-1275173032-172.17.0.2-1733948022061/current, will proceed with Du for space computation calculation, 2024-12-11T20:13:43,738 WARN [Thread-563 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/07e5cb37-a8ad-e6c3-4f33-b29594635d59/cluster_05bd6f84-ac52-08df-75b7-2ab28b8012bd/data/data2/current/BP-1275173032-172.17.0.2-1733948022061/current, will proceed with Du for space computation calculation, 2024-12-11T20:13:43,768 WARN [Thread-504 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-11T20:13:43,772 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xf82712ccb71d43db with lease ID 0x17538aeff47b6590: Processing first storage report for DS-e5d25514-f39a-4db0-8e13-e8bdf243c74a from datanode DatanodeRegistration(127.0.0.1:34453, datanodeUuid=8779e464-33c2-4161-ad6c-472b8e8b1883, infoPort=38789, infoSecurePort=0, ipcPort=39701, storageInfo=lv=-57;cid=testClusterID;nsid=786450454;c=1733948022061) 2024-12-11T20:13:43,772 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf82712ccb71d43db with lease ID 0x17538aeff47b6590: from storage DS-e5d25514-f39a-4db0-8e13-e8bdf243c74a node DatanodeRegistration(127.0.0.1:34453, datanodeUuid=8779e464-33c2-4161-ad6c-472b8e8b1883, infoPort=38789, infoSecurePort=0, ipcPort=39701, storageInfo=lv=-57;cid=testClusterID;nsid=786450454;c=1733948022061), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-11T20:13:43,772 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xf82712ccb71d43db with lease ID 0x17538aeff47b6590: Processing first storage report for DS-daa43ae3-0d6d-4464-9431-e81f48012c4c from datanode DatanodeRegistration(127.0.0.1:34453, datanodeUuid=8779e464-33c2-4161-ad6c-472b8e8b1883, infoPort=38789, infoSecurePort=0, ipcPort=39701, storageInfo=lv=-57;cid=testClusterID;nsid=786450454;c=1733948022061) 2024-12-11T20:13:43,772 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf82712ccb71d43db with lease ID 0x17538aeff47b6590: from storage DS-daa43ae3-0d6d-4464-9431-e81f48012c4c node DatanodeRegistration(127.0.0.1:34453, datanodeUuid=8779e464-33c2-4161-ad6c-472b8e8b1883, infoPort=38789, infoSecurePort=0, ipcPort=39701, storageInfo=lv=-57;cid=testClusterID;nsid=786450454;c=1733948022061), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-11T20:13:44,060 WARN [Thread-576 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/07e5cb37-a8ad-e6c3-4f33-b29594635d59/cluster_05bd6f84-ac52-08df-75b7-2ab28b8012bd/data/data4/current/BP-1275173032-172.17.0.2-1733948022061/current, will proceed with Du for space computation calculation, 2024-12-11T20:13:44,060 WARN [Thread-575 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/07e5cb37-a8ad-e6c3-4f33-b29594635d59/cluster_05bd6f84-ac52-08df-75b7-2ab28b8012bd/data/data3/current/BP-1275173032-172.17.0.2-1733948022061/current, will proceed with Du for space computation calculation, 2024-12-11T20:13:44,082 WARN [Thread-527 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-11T20:13:44,089 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xd6323fc348913b66 with lease ID 0x17538aeff47b6591: Processing first storage report for DS-82324791-7950-4df5-be2b-2712c0cf564b from datanode DatanodeRegistration(127.0.0.1:44659, datanodeUuid=e741c26c-9e20-4fbb-b36c-31f9c4b7854c, infoPort=36249, infoSecurePort=0, ipcPort=40891, storageInfo=lv=-57;cid=testClusterID;nsid=786450454;c=1733948022061) 2024-12-11T20:13:44,089 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd6323fc348913b66 with lease ID 0x17538aeff47b6591: from storage DS-82324791-7950-4df5-be2b-2712c0cf564b node DatanodeRegistration(127.0.0.1:44659, datanodeUuid=e741c26c-9e20-4fbb-b36c-31f9c4b7854c, infoPort=36249, infoSecurePort=0, ipcPort=40891, storageInfo=lv=-57;cid=testClusterID;nsid=786450454;c=1733948022061), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-11T20:13:44,089 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xd6323fc348913b66 with lease ID 0x17538aeff47b6591: Processing first storage report for DS-76c264fd-07e9-4422-918d-60eb01aa68ca from datanode DatanodeRegistration(127.0.0.1:44659, datanodeUuid=e741c26c-9e20-4fbb-b36c-31f9c4b7854c, infoPort=36249, infoSecurePort=0, ipcPort=40891, storageInfo=lv=-57;cid=testClusterID;nsid=786450454;c=1733948022061) 2024-12-11T20:13:44,090 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd6323fc348913b66 with lease ID 0x17538aeff47b6591: from storage DS-76c264fd-07e9-4422-918d-60eb01aa68ca node DatanodeRegistration(127.0.0.1:44659, datanodeUuid=e741c26c-9e20-4fbb-b36c-31f9c4b7854c, infoPort=36249, infoSecurePort=0, ipcPort=40891, storageInfo=lv=-57;cid=testClusterID;nsid=786450454;c=1733948022061), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-11T20:13:44,185 WARN [Thread-586 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/07e5cb37-a8ad-e6c3-4f33-b29594635d59/cluster_05bd6f84-ac52-08df-75b7-2ab28b8012bd/data/data5/current/BP-1275173032-172.17.0.2-1733948022061/current, will proceed with Du for space computation calculation, 2024-12-11T20:13:44,185 WARN [Thread-587 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/07e5cb37-a8ad-e6c3-4f33-b29594635d59/cluster_05bd6f84-ac52-08df-75b7-2ab28b8012bd/data/data6/current/BP-1275173032-172.17.0.2-1733948022061/current, will proceed with Du for space computation calculation, 2024-12-11T20:13:44,213 WARN [Thread-550 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-11T20:13:44,223 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x67f30c6c6482af2b with lease ID 0x17538aeff47b6592: Processing first storage report for DS-cb829690-6c5c-4110-8240-dcd4f1822ea4 from datanode DatanodeRegistration(127.0.0.1:41547, datanodeUuid=195a5ec9-38f6-410c-b387-ca30d81d46ae, infoPort=33311, infoSecurePort=0, ipcPort=36313, storageInfo=lv=-57;cid=testClusterID;nsid=786450454;c=1733948022061) 2024-12-11T20:13:44,223 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x67f30c6c6482af2b with lease ID 0x17538aeff47b6592: from storage DS-cb829690-6c5c-4110-8240-dcd4f1822ea4 node DatanodeRegistration(127.0.0.1:41547, datanodeUuid=195a5ec9-38f6-410c-b387-ca30d81d46ae, infoPort=33311, infoSecurePort=0, ipcPort=36313, storageInfo=lv=-57;cid=testClusterID;nsid=786450454;c=1733948022061), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-11T20:13:44,223 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x67f30c6c6482af2b with lease ID 0x17538aeff47b6592: Processing first storage report for DS-35d90898-9947-409f-80b1-e82376541749 from datanode DatanodeRegistration(127.0.0.1:41547, datanodeUuid=195a5ec9-38f6-410c-b387-ca30d81d46ae, infoPort=33311, infoSecurePort=0, ipcPort=36313, storageInfo=lv=-57;cid=testClusterID;nsid=786450454;c=1733948022061) 2024-12-11T20:13:44,223 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x67f30c6c6482af2b with lease ID 0x17538aeff47b6592: from storage DS-35d90898-9947-409f-80b1-e82376541749 node DatanodeRegistration(127.0.0.1:41547, datanodeUuid=195a5ec9-38f6-410c-b387-ca30d81d46ae, infoPort=33311, infoSecurePort=0, ipcPort=36313, storageInfo=lv=-57;cid=testClusterID;nsid=786450454;c=1733948022061), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-11T20:13:44,290 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/07e5cb37-a8ad-e6c3-4f33-b29594635d59 2024-12-11T20:13:44,307 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/07e5cb37-a8ad-e6c3-4f33-b29594635d59/cluster_05bd6f84-ac52-08df-75b7-2ab28b8012bd/zookeeper_0, clientPort=53625, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/07e5cb37-a8ad-e6c3-4f33-b29594635d59/cluster_05bd6f84-ac52-08df-75b7-2ab28b8012bd/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/07e5cb37-a8ad-e6c3-4f33-b29594635d59/cluster_05bd6f84-ac52-08df-75b7-2ab28b8012bd/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-11T20:13:44,331 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=53625 2024-12-11T20:13:44,332 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-11T20:13:44,334 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-11T20:13:44,379 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44659 is added to blk_1073741825_1001 (size=7) 2024-12-11T20:13:44,380 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41547 is added to blk_1073741825_1001 (size=7) 2024-12-11T20:13:44,381 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:40513/user/jenkins/test-data/c4cda24e-7a10-f29a-6e48-0386002ed532 with version=8 2024-12-11T20:13:44,381 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:39591/user/jenkins/test-data/02e66db6-ee0b-0338-51ff-56955f597119/hbase-staging 2024-12-11T20:13:44,383 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34453 is added to blk_1073741825_1001 (size=7) 2024-12-11T20:13:44,384 INFO [Time-limited test {}] client.ConnectionUtils(128): master/5be53b084ac7:0 server-side Connection retries=45 2024-12-11T20:13:44,384 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-11T20:13:44,384 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-11T20:13:44,384 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-11T20:13:44,384 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-11T20:13:44,384 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-11T20:13:44,384 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-11T20:13:44,384 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-11T20:13:44,385 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:46085 2024-12-11T20:13:44,387 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:46085 connecting to ZooKeeper ensemble=127.0.0.1:53625 2024-12-11T20:13:44,458 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:460850x0, quorum=127.0.0.1:53625, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-11T20:13:44,459 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:46085-0x1001690e4890000 connected 2024-12-11T20:13:44,526 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-11T20:13:44,528 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-11T20:13:44,530 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:46085-0x1001690e4890000, quorum=127.0.0.1:53625, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-11T20:13:44,530 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:40513/user/jenkins/test-data/c4cda24e-7a10-f29a-6e48-0386002ed532, hbase.cluster.distributed=false 2024-12-11T20:13:44,532 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:46085-0x1001690e4890000, quorum=127.0.0.1:53625, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-11T20:13:44,533 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=46085 2024-12-11T20:13:44,533 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=46085 2024-12-11T20:13:44,533 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=46085 2024-12-11T20:13:44,533 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=46085 2024-12-11T20:13:44,533 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=46085 2024-12-11T20:13:44,549 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/5be53b084ac7:0 server-side Connection retries=45 2024-12-11T20:13:44,549 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-11T20:13:44,549 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-11T20:13:44,549 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-11T20:13:44,549 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-11T20:13:44,549 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-11T20:13:44,549 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-11T20:13:44,550 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-11T20:13:44,552 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:42795 2024-12-11T20:13:44,554 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:42795 connecting to ZooKeeper ensemble=127.0.0.1:53625 2024-12-11T20:13:44,555 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-11T20:13:44,557 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-11T20:13:44,568 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:427950x0, quorum=127.0.0.1:53625, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-11T20:13:44,568 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:427950x0, quorum=127.0.0.1:53625, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-11T20:13:44,568 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:42795-0x1001690e4890001 connected 2024-12-11T20:13:44,569 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-11T20:13:44,569 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-11T20:13:44,570 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42795-0x1001690e4890001, quorum=127.0.0.1:53625, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-11T20:13:44,573 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42795-0x1001690e4890001, quorum=127.0.0.1:53625, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-11T20:13:44,574 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=42795 2024-12-11T20:13:44,574 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=42795 2024-12-11T20:13:44,574 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=42795 2024-12-11T20:13:44,579 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=42795 2024-12-11T20:13:44,579 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=42795 2024-12-11T20:13:44,604 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/5be53b084ac7:0 server-side Connection retries=45 2024-12-11T20:13:44,604 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-11T20:13:44,604 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-11T20:13:44,604 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-11T20:13:44,604 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-11T20:13:44,604 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-11T20:13:44,604 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-11T20:13:44,604 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-11T20:13:44,611 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:42037 2024-12-11T20:13:44,615 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:42037 connecting to ZooKeeper ensemble=127.0.0.1:53625 2024-12-11T20:13:44,616 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-11T20:13:44,619 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-11T20:13:44,636 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:420370x0, quorum=127.0.0.1:53625, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-11T20:13:44,643 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:42037-0x1001690e4890002 connected 2024-12-11T20:13:44,645 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42037-0x1001690e4890002, quorum=127.0.0.1:53625, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-11T20:13:44,646 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-11T20:13:44,652 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-11T20:13:44,659 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42037-0x1001690e4890002, quorum=127.0.0.1:53625, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-11T20:13:44,671 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42037-0x1001690e4890002, quorum=127.0.0.1:53625, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-11T20:13:44,675 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=42037 2024-12-11T20:13:44,675 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=42037 2024-12-11T20:13:44,679 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=42037 2024-12-11T20:13:44,680 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=42037 2024-12-11T20:13:44,683 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=42037 2024-12-11T20:13:44,700 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/5be53b084ac7:0 server-side Connection retries=45 2024-12-11T20:13:44,700 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-11T20:13:44,700 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-11T20:13:44,701 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-11T20:13:44,701 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-11T20:13:44,701 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-11T20:13:44,701 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-11T20:13:44,701 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-11T20:13:44,707 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:33319 2024-12-11T20:13:44,709 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:33319 connecting to ZooKeeper ensemble=127.0.0.1:53625 2024-12-11T20:13:44,709 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-11T20:13:44,711 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-11T20:13:44,743 DEBUG [pool-336-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:333190x0, quorum=127.0.0.1:53625, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-11T20:13:44,744 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:333190x0, quorum=127.0.0.1:53625, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-11T20:13:44,745 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-11T20:13:44,748 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:33319-0x1001690e4890003 connected 2024-12-11T20:13:44,750 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-11T20:13:44,751 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33319-0x1001690e4890003, quorum=127.0.0.1:53625, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-11T20:13:44,753 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33319-0x1001690e4890003, quorum=127.0.0.1:53625, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-11T20:13:44,754 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=33319 2024-12-11T20:13:44,754 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=33319 2024-12-11T20:13:44,755 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=33319 2024-12-11T20:13:44,761 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=33319 2024-12-11T20:13:44,761 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=33319 2024-12-11T20:13:44,774 DEBUG [M:0;5be53b084ac7:46085 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;5be53b084ac7:46085 2024-12-11T20:13:44,775 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/5be53b084ac7,46085,1733948024383 2024-12-11T20:13:44,784 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46085-0x1001690e4890000, quorum=127.0.0.1:53625, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-11T20:13:44,784 DEBUG [pool-336-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33319-0x1001690e4890003, quorum=127.0.0.1:53625, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-11T20:13:44,785 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42037-0x1001690e4890002, quorum=127.0.0.1:53625, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-11T20:13:44,785 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42795-0x1001690e4890001, quorum=127.0.0.1:53625, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-11T20:13:44,787 DEBUG [master/5be53b084ac7:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:46085-0x1001690e4890000, quorum=127.0.0.1:53625, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/5be53b084ac7,46085,1733948024383 2024-12-11T20:13:44,793 DEBUG [pool-336-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33319-0x1001690e4890003, quorum=127.0.0.1:53625, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-11T20:13:44,793 DEBUG [pool-336-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33319-0x1001690e4890003, quorum=127.0.0.1:53625, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T20:13:44,793 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46085-0x1001690e4890000, quorum=127.0.0.1:53625, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T20:13:44,793 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42795-0x1001690e4890001, quorum=127.0.0.1:53625, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-11T20:13:44,793 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42795-0x1001690e4890001, quorum=127.0.0.1:53625, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T20:13:44,795 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42037-0x1001690e4890002, quorum=127.0.0.1:53625, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-11T20:13:44,795 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42037-0x1001690e4890002, quorum=127.0.0.1:53625, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T20:13:44,795 DEBUG [master/5be53b084ac7:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:46085-0x1001690e4890000, quorum=127.0.0.1:53625, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-11T20:13:44,796 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/5be53b084ac7,46085,1733948024383 from backup master directory 2024-12-11T20:13:44,801 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42037-0x1001690e4890002, quorum=127.0.0.1:53625, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-11T20:13:44,801 DEBUG [pool-336-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33319-0x1001690e4890003, quorum=127.0.0.1:53625, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-11T20:13:44,801 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42795-0x1001690e4890001, quorum=127.0.0.1:53625, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-11T20:13:44,801 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46085-0x1001690e4890000, quorum=127.0.0.1:53625, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/5be53b084ac7,46085,1733948024383 2024-12-11T20:13:44,801 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46085-0x1001690e4890000, quorum=127.0.0.1:53625, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-11T20:13:44,801 WARN [master/5be53b084ac7:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-11T20:13:44,801 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=5be53b084ac7,46085,1733948024383 2024-12-11T20:13:44,813 DEBUG [master/5be53b084ac7:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:40513/user/jenkins/test-data/c4cda24e-7a10-f29a-6e48-0386002ed532/hbase.id] with ID: ad2ac32b-7078-490c-8d86-abc1c963290d 2024-12-11T20:13:44,813 DEBUG [master/5be53b084ac7:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:40513/user/jenkins/test-data/c4cda24e-7a10-f29a-6e48-0386002ed532/.tmp/hbase.id 2024-12-11T20:13:44,831 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34453 is added to blk_1073741826_1002 (size=42) 2024-12-11T20:13:44,832 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44659 is added to blk_1073741826_1002 (size=42) 2024-12-11T20:13:44,834 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41547 is added to blk_1073741826_1002 (size=42) 2024-12-11T20:13:44,835 DEBUG [master/5be53b084ac7:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:40513/user/jenkins/test-data/c4cda24e-7a10-f29a-6e48-0386002ed532/.tmp/hbase.id]:[hdfs://localhost:40513/user/jenkins/test-data/c4cda24e-7a10-f29a-6e48-0386002ed532/hbase.id] 2024-12-11T20:13:44,867 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-11T20:13:44,867 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-11T20:13:44,870 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-12-11T20:13:44,952 DEBUG [pool-336-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33319-0x1001690e4890003, quorum=127.0.0.1:53625, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T20:13:44,952 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46085-0x1001690e4890000, quorum=127.0.0.1:53625, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T20:13:44,952 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42795-0x1001690e4890001, quorum=127.0.0.1:53625, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T20:13:44,952 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42037-0x1001690e4890002, quorum=127.0.0.1:53625, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T20:13:44,965 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44659 is added to blk_1073741827_1003 (size=196) 2024-12-11T20:13:44,966 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34453 is added to blk_1073741827_1003 (size=196) 2024-12-11T20:13:44,966 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41547 is added to blk_1073741827_1003 (size=196) 2024-12-11T20:13:44,970 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-11T20:13:44,971 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-11T20:13:44,972 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-11T20:13:44,987 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41547 is added to blk_1073741828_1004 (size=1189) 2024-12-11T20:13:44,987 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34453 is added to blk_1073741828_1004 (size=1189) 2024-12-11T20:13:44,988 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44659 is added to blk_1073741828_1004 (size=1189) 2024-12-11T20:13:44,989 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:40513/user/jenkins/test-data/c4cda24e-7a10-f29a-6e48-0386002ed532/MasterData/data/master/store 2024-12-11T20:13:45,001 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44659 is added to blk_1073741829_1005 (size=34) 2024-12-11T20:13:45,002 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34453 is added to blk_1073741829_1005 (size=34) 2024-12-11T20:13:45,002 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41547 is added to blk_1073741829_1005 (size=34) 2024-12-11T20:13:45,003 DEBUG [master/5be53b084ac7:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T20:13:45,003 DEBUG [master/5be53b084ac7:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-11T20:13:45,003 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-11T20:13:45,003 DEBUG [master/5be53b084ac7:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-11T20:13:45,003 DEBUG [master/5be53b084ac7:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-11T20:13:45,003 DEBUG [master/5be53b084ac7:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-11T20:13:45,003 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-11T20:13:45,003 DEBUG [master/5be53b084ac7:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733948025003Disabling compacts and flushes for region at 1733948025003Disabling writes for close at 1733948025003Writing region close event to WAL at 1733948025003Closed at 1733948025003 2024-12-11T20:13:45,004 WARN [master/5be53b084ac7:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:40513/user/jenkins/test-data/c4cda24e-7a10-f29a-6e48-0386002ed532/MasterData/data/master/store/.initializing 2024-12-11T20:13:45,005 DEBUG [master/5be53b084ac7:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:40513/user/jenkins/test-data/c4cda24e-7a10-f29a-6e48-0386002ed532/MasterData/WALs/5be53b084ac7,46085,1733948024383 2024-12-11T20:13:45,009 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=5be53b084ac7%2C46085%2C1733948024383, suffix=, logDir=hdfs://localhost:40513/user/jenkins/test-data/c4cda24e-7a10-f29a-6e48-0386002ed532/MasterData/WALs/5be53b084ac7,46085,1733948024383, archiveDir=hdfs://localhost:40513/user/jenkins/test-data/c4cda24e-7a10-f29a-6e48-0386002ed532/MasterData/oldWALs, maxLogs=10 2024-12-11T20:13:45,009 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 5be53b084ac7%2C46085%2C1733948024383.1733948025009 2024-12-11T20:13:45,021 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/c4cda24e-7a10-f29a-6e48-0386002ed532/MasterData/WALs/5be53b084ac7,46085,1733948024383/5be53b084ac7%2C46085%2C1733948024383.1733948025009 2024-12-11T20:13:45,026 DEBUG [master/5be53b084ac7:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36249:36249),(127.0.0.1/127.0.0.1:33311:33311),(127.0.0.1/127.0.0.1:38789:38789)] 2024-12-11T20:13:45,027 DEBUG [master/5be53b084ac7:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-11T20:13:45,027 DEBUG [master/5be53b084ac7:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T20:13:45,028 DEBUG [master/5be53b084ac7:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-11T20:13:45,028 DEBUG [master/5be53b084ac7:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-11T20:13:45,030 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-11T20:13:45,033 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-11T20:13:45,033 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T20:13:45,034 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-11T20:13:45,034 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-11T20:13:45,036 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-11T20:13:45,037 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T20:13:45,037 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T20:13:45,038 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-11T20:13:45,041 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-11T20:13:45,041 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T20:13:45,042 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T20:13:45,042 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-11T20:13:45,045 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-11T20:13:45,045 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T20:13:45,046 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T20:13:45,046 DEBUG [master/5be53b084ac7:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-11T20:13:45,047 DEBUG [master/5be53b084ac7:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40513/user/jenkins/test-data/c4cda24e-7a10-f29a-6e48-0386002ed532/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-11T20:13:45,047 DEBUG [master/5be53b084ac7:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40513/user/jenkins/test-data/c4cda24e-7a10-f29a-6e48-0386002ed532/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-11T20:13:45,050 DEBUG [master/5be53b084ac7:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-11T20:13:45,050 DEBUG [master/5be53b084ac7:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-11T20:13:45,050 DEBUG [master/5be53b084ac7:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-11T20:13:45,052 DEBUG [master/5be53b084ac7:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-11T20:13:45,061 DEBUG [master/5be53b084ac7:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40513/user/jenkins/test-data/c4cda24e-7a10-f29a-6e48-0386002ed532/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-11T20:13:45,062 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=70982875, jitterRate=0.05772726237773895}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-11T20:13:45,063 DEBUG [master/5be53b084ac7:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733948025028Initializing all the Stores at 1733948025030 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733948025030Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733948025030Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733948025030Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733948025030Cleaning up temporary data from old regions at 1733948025050 (+20 ms)Region opened successfully at 1733948025063 (+13 ms) 2024-12-11T20:13:45,067 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-11T20:13:45,074 DEBUG [master/5be53b084ac7:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5552d9ea, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=5be53b084ac7/172.17.0.2:0 2024-12-11T20:13:45,076 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-11T20:13:45,077 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-11T20:13:45,077 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-11T20:13:45,077 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-11T20:13:45,078 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-12-11T20:13:45,079 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-12-11T20:13:45,079 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-11T20:13:45,083 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-11T20:13:45,085 DEBUG [master/5be53b084ac7:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46085-0x1001690e4890000, quorum=127.0.0.1:53625, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-11T20:13:45,099 DEBUG [master/5be53b084ac7:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-11T20:13:45,100 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-11T20:13:45,105 DEBUG [master/5be53b084ac7:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46085-0x1001690e4890000, quorum=127.0.0.1:53625, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-11T20:13:45,119 DEBUG [master/5be53b084ac7:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-11T20:13:45,120 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-11T20:13:45,131 DEBUG [master/5be53b084ac7:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46085-0x1001690e4890000, quorum=127.0.0.1:53625, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-11T20:13:45,142 DEBUG [master/5be53b084ac7:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-11T20:13:45,147 DEBUG [master/5be53b084ac7:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46085-0x1001690e4890000, quorum=127.0.0.1:53625, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-11T20:13:45,151 DEBUG [master/5be53b084ac7:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-11T20:13:45,159 DEBUG [master/5be53b084ac7:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46085-0x1001690e4890000, quorum=127.0.0.1:53625, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-11T20:13:45,169 DEBUG [master/5be53b084ac7:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-11T20:13:45,176 DEBUG [pool-336-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33319-0x1001690e4890003, quorum=127.0.0.1:53625, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-11T20:13:45,176 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42795-0x1001690e4890001, quorum=127.0.0.1:53625, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-11T20:13:45,179 DEBUG [pool-336-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33319-0x1001690e4890003, quorum=127.0.0.1:53625, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T20:13:45,179 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42037-0x1001690e4890002, quorum=127.0.0.1:53625, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-11T20:13:45,179 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42795-0x1001690e4890001, quorum=127.0.0.1:53625, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T20:13:45,179 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42037-0x1001690e4890002, quorum=127.0.0.1:53625, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T20:13:45,183 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46085-0x1001690e4890000, quorum=127.0.0.1:53625, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-11T20:13:45,183 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46085-0x1001690e4890000, quorum=127.0.0.1:53625, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T20:13:45,186 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=5be53b084ac7,46085,1733948024383, sessionid=0x1001690e4890000, setting cluster-up flag (Was=false) 2024-12-11T20:13:45,206 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-11T20:13:45,211 DEBUG [pool-336-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33319-0x1001690e4890003, quorum=127.0.0.1:53625, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T20:13:45,211 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46085-0x1001690e4890000, quorum=127.0.0.1:53625, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T20:13:45,212 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42795-0x1001690e4890001, quorum=127.0.0.1:53625, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T20:13:45,212 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42037-0x1001690e4890002, quorum=127.0.0.1:53625, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T20:13:45,220 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-11T20:13:45,234 DEBUG [master/5be53b084ac7:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-11T20:13:45,236 DEBUG [master/5be53b084ac7:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=5be53b084ac7,46085,1733948024383 2024-12-11T20:13:45,251 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42037-0x1001690e4890002, quorum=127.0.0.1:53625, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T20:13:45,251 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46085-0x1001690e4890000, quorum=127.0.0.1:53625, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T20:13:45,251 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42795-0x1001690e4890001, quorum=127.0.0.1:53625, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T20:13:45,251 DEBUG [pool-336-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33319-0x1001690e4890003, quorum=127.0.0.1:53625, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T20:13:45,270 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-11T20:13:45,270 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-11T20:13:45,287 DEBUG [master/5be53b084ac7:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-11T20:13:45,304 DEBUG [master/5be53b084ac7:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=5be53b084ac7,46085,1733948024383 2024-12-11T20:13:45,321 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:40513/user/jenkins/test-data/c4cda24e-7a10-f29a-6e48-0386002ed532/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-11T20:13:45,327 DEBUG [master/5be53b084ac7:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-11T20:13:45,327 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-11T20:13:45,327 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-11T20:13:45,328 DEBUG [master/5be53b084ac7:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 5be53b084ac7,46085,1733948024383 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-11T20:13:45,330 DEBUG [master/5be53b084ac7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/5be53b084ac7:0, corePoolSize=5, maxPoolSize=5 2024-12-11T20:13:45,331 DEBUG [master/5be53b084ac7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/5be53b084ac7:0, corePoolSize=5, maxPoolSize=5 2024-12-11T20:13:45,331 DEBUG [master/5be53b084ac7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/5be53b084ac7:0, corePoolSize=5, maxPoolSize=5 2024-12-11T20:13:45,331 DEBUG [master/5be53b084ac7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/5be53b084ac7:0, corePoolSize=5, maxPoolSize=5 2024-12-11T20:13:45,331 DEBUG [master/5be53b084ac7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/5be53b084ac7:0, corePoolSize=10, maxPoolSize=10 2024-12-11T20:13:45,331 DEBUG [master/5be53b084ac7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/5be53b084ac7:0, corePoolSize=1, maxPoolSize=1 2024-12-11T20:13:45,331 DEBUG [master/5be53b084ac7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/5be53b084ac7:0, corePoolSize=2, maxPoolSize=2 2024-12-11T20:13:45,331 DEBUG [master/5be53b084ac7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/5be53b084ac7:0, corePoolSize=1, maxPoolSize=1 2024-12-11T20:13:45,342 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-11T20:13:45,342 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-11T20:13:45,346 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733948055346 2024-12-11T20:13:45,346 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-11T20:13:45,346 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-11T20:13:45,346 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-11T20:13:45,346 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-11T20:13:45,347 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-11T20:13:45,347 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-11T20:13:45,347 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-11T20:13:45,348 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T20:13:45,348 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-11T20:13:45,349 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-11T20:13:45,350 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-11T20:13:45,350 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-11T20:13:45,355 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-11T20:13:45,355 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-11T20:13:45,356 DEBUG [master/5be53b084ac7:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/5be53b084ac7:0:becomeActiveMaster-HFileCleaner.large.0-1733948025355,5,FailOnTimeoutGroup] 2024-12-11T20:13:45,357 DEBUG [master/5be53b084ac7:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/5be53b084ac7:0:becomeActiveMaster-HFileCleaner.small.0-1733948025356,5,FailOnTimeoutGroup] 2024-12-11T20:13:45,357 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-11T20:13:45,357 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-11T20:13:45,357 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-11T20:13:45,357 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-11T20:13:45,365 INFO [RS:0;5be53b084ac7:42795 {}] regionserver.HRegionServer(746): ClusterId : ad2ac32b-7078-490c-8d86-abc1c963290d 2024-12-11T20:13:45,365 DEBUG [RS:0;5be53b084ac7:42795 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-11T20:13:45,384 INFO [RS:1;5be53b084ac7:42037 {}] regionserver.HRegionServer(746): ClusterId : ad2ac32b-7078-490c-8d86-abc1c963290d 2024-12-11T20:13:45,384 DEBUG [RS:1;5be53b084ac7:42037 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-11T20:13:45,385 INFO [RS:2;5be53b084ac7:33319 {}] regionserver.HRegionServer(746): ClusterId : ad2ac32b-7078-490c-8d86-abc1c963290d 2024-12-11T20:13:45,385 DEBUG [RS:2;5be53b084ac7:33319 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-11T20:13:45,395 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34453 is added to blk_1073741831_1007 (size=1321) 2024-12-11T20:13:45,395 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41547 is added to blk_1073741831_1007 (size=1321) 2024-12-11T20:13:45,395 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44659 is added to blk_1073741831_1007 (size=1321) 2024-12-11T20:13:45,396 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:40513/user/jenkins/test-data/c4cda24e-7a10-f29a-6e48-0386002ed532/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-11T20:13:45,397 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:40513/user/jenkins/test-data/c4cda24e-7a10-f29a-6e48-0386002ed532 2024-12-11T20:13:45,404 DEBUG [RS:0;5be53b084ac7:42795 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-11T20:13:45,404 DEBUG [RS:0;5be53b084ac7:42795 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-11T20:13:45,410 DEBUG [RS:2;5be53b084ac7:33319 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-11T20:13:45,410 DEBUG [RS:2;5be53b084ac7:33319 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-11T20:13:45,411 DEBUG [RS:0;5be53b084ac7:42795 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-11T20:13:45,412 DEBUG [RS:0;5be53b084ac7:42795 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@79bc171e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=5be53b084ac7/172.17.0.2:0 2024-12-11T20:13:45,413 DEBUG [RS:1;5be53b084ac7:42037 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-11T20:13:45,414 DEBUG [RS:1;5be53b084ac7:42037 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-11T20:13:45,428 DEBUG [RS:2;5be53b084ac7:33319 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-11T20:13:45,428 DEBUG [RS:1;5be53b084ac7:42037 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-11T20:13:45,429 DEBUG [RS:2;5be53b084ac7:33319 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@d6eed0d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=5be53b084ac7/172.17.0.2:0 2024-12-11T20:13:45,429 DEBUG [RS:1;5be53b084ac7:42037 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@fd24393, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=5be53b084ac7/172.17.0.2:0 2024-12-11T20:13:45,429 DEBUG [RS:0;5be53b084ac7:42795 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;5be53b084ac7:42795 2024-12-11T20:13:45,429 INFO [RS:0;5be53b084ac7:42795 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-11T20:13:45,429 INFO [RS:0;5be53b084ac7:42795 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-11T20:13:45,429 DEBUG [RS:0;5be53b084ac7:42795 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-11T20:13:45,431 INFO [RS:0;5be53b084ac7:42795 {}] regionserver.HRegionServer(2659): reportForDuty to master=5be53b084ac7,46085,1733948024383 with port=42795, startcode=1733948024548 2024-12-11T20:13:45,431 DEBUG [RS:0;5be53b084ac7:42795 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-11T20:13:45,442 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44659 is added to blk_1073741832_1008 (size=32) 2024-12-11T20:13:45,442 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41547 is added to blk_1073741832_1008 (size=32) 2024-12-11T20:13:45,451 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34453 is added to blk_1073741832_1008 (size=32) 2024-12-11T20:13:45,451 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37087, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.3 (auth:SIMPLE), service=RegionServerStatusService 2024-12-11T20:13:45,452 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46085 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 5be53b084ac7,42795,1733948024548 2024-12-11T20:13:45,452 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46085 {}] master.ServerManager(517): Registering regionserver=5be53b084ac7,42795,1733948024548 2024-12-11T20:13:45,461 DEBUG [RS:1;5be53b084ac7:42037 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;5be53b084ac7:42037 2024-12-11T20:13:45,462 INFO [RS:1;5be53b084ac7:42037 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-11T20:13:45,462 INFO [RS:1;5be53b084ac7:42037 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-11T20:13:45,462 DEBUG [RS:1;5be53b084ac7:42037 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-11T20:13:45,463 DEBUG [RS:0;5be53b084ac7:42795 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:40513/user/jenkins/test-data/c4cda24e-7a10-f29a-6e48-0386002ed532 2024-12-11T20:13:45,463 INFO [RS:1;5be53b084ac7:42037 {}] regionserver.HRegionServer(2659): reportForDuty to master=5be53b084ac7,46085,1733948024383 with port=42037, startcode=1733948024603 2024-12-11T20:13:45,463 DEBUG [RS:0;5be53b084ac7:42795 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:40513 2024-12-11T20:13:45,463 DEBUG [RS:0;5be53b084ac7:42795 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-11T20:13:45,463 DEBUG [RS:1;5be53b084ac7:42037 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-11T20:13:45,465 DEBUG [RS:2;5be53b084ac7:33319 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:2;5be53b084ac7:33319 2024-12-11T20:13:45,465 INFO [RS:2;5be53b084ac7:33319 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-11T20:13:45,465 INFO [RS:2;5be53b084ac7:33319 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-11T20:13:45,465 DEBUG [RS:2;5be53b084ac7:33319 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-11T20:13:45,469 INFO [RS:2;5be53b084ac7:33319 {}] regionserver.HRegionServer(2659): reportForDuty to master=5be53b084ac7,46085,1733948024383 with port=33319, startcode=1733948024700 2024-12-11T20:13:45,469 DEBUG [RS:0;5be53b084ac7:42795 {}] zookeeper.ZKUtil(111): regionserver:42795-0x1001690e4890001, quorum=127.0.0.1:53625, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/5be53b084ac7,42795,1733948024548 2024-12-11T20:13:45,469 WARN [RS:0;5be53b084ac7:42795 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-11T20:13:45,469 DEBUG [RS:2;5be53b084ac7:33319 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-11T20:13:45,469 INFO [RS:0;5be53b084ac7:42795 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-11T20:13:45,469 DEBUG [RS:0;5be53b084ac7:42795 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:40513/user/jenkins/test-data/c4cda24e-7a10-f29a-6e48-0386002ed532/WALs/5be53b084ac7,42795,1733948024548 2024-12-11T20:13:45,471 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46085-0x1001690e4890000, quorum=127.0.0.1:53625, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-11T20:13:45,471 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58405, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.4 (auth:SIMPLE), service=RegionServerStatusService 2024-12-11T20:13:45,472 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46085 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 5be53b084ac7,42037,1733948024603 2024-12-11T20:13:45,472 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46085 {}] master.ServerManager(517): Registering regionserver=5be53b084ac7,42037,1733948024603 2024-12-11T20:13:45,472 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39041, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.5 (auth:SIMPLE), service=RegionServerStatusService 2024-12-11T20:13:45,474 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46085 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 5be53b084ac7,33319,1733948024700 2024-12-11T20:13:45,474 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46085 {}] master.ServerManager(517): Registering regionserver=5be53b084ac7,33319,1733948024700 2024-12-11T20:13:45,474 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [5be53b084ac7,42795,1733948024548] 2024-12-11T20:13:45,475 DEBUG [RS:1;5be53b084ac7:42037 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:40513/user/jenkins/test-data/c4cda24e-7a10-f29a-6e48-0386002ed532 2024-12-11T20:13:45,475 DEBUG [RS:1;5be53b084ac7:42037 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:40513 2024-12-11T20:13:45,475 DEBUG [RS:1;5be53b084ac7:42037 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-11T20:13:45,484 DEBUG [RS:2;5be53b084ac7:33319 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:40513/user/jenkins/test-data/c4cda24e-7a10-f29a-6e48-0386002ed532 2024-12-11T20:13:45,484 DEBUG [RS:2;5be53b084ac7:33319 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:40513 2024-12-11T20:13:45,484 DEBUG [RS:2;5be53b084ac7:33319 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-11T20:13:45,510 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46085-0x1001690e4890000, quorum=127.0.0.1:53625, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-11T20:13:45,511 INFO [RS:0;5be53b084ac7:42795 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-11T20:13:45,525 INFO [RS:0;5be53b084ac7:42795 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-11T20:13:45,527 DEBUG [RS:1;5be53b084ac7:42037 {}] zookeeper.ZKUtil(111): regionserver:42037-0x1001690e4890002, quorum=127.0.0.1:53625, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/5be53b084ac7,42037,1733948024603 2024-12-11T20:13:45,527 WARN [RS:1;5be53b084ac7:42037 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-11T20:13:45,527 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [5be53b084ac7,33319,1733948024700] 2024-12-11T20:13:45,527 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [5be53b084ac7,42037,1733948024603] 2024-12-11T20:13:45,527 INFO [RS:1;5be53b084ac7:42037 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-11T20:13:45,527 DEBUG [RS:1;5be53b084ac7:42037 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:40513/user/jenkins/test-data/c4cda24e-7a10-f29a-6e48-0386002ed532/WALs/5be53b084ac7,42037,1733948024603 2024-12-11T20:13:45,529 INFO [RS:0;5be53b084ac7:42795 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-11T20:13:45,529 INFO [RS:0;5be53b084ac7:42795 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-11T20:13:45,529 DEBUG [RS:2;5be53b084ac7:33319 {}] zookeeper.ZKUtil(111): regionserver:33319-0x1001690e4890003, quorum=127.0.0.1:53625, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/5be53b084ac7,33319,1733948024700 2024-12-11T20:13:45,529 WARN [RS:2;5be53b084ac7:33319 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-11T20:13:45,529 INFO [RS:2;5be53b084ac7:33319 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-11T20:13:45,529 DEBUG [RS:2;5be53b084ac7:33319 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:40513/user/jenkins/test-data/c4cda24e-7a10-f29a-6e48-0386002ed532/WALs/5be53b084ac7,33319,1733948024700 2024-12-11T20:13:45,530 INFO [RS:0;5be53b084ac7:42795 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-11T20:13:45,532 INFO [RS:0;5be53b084ac7:42795 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-11T20:13:45,532 INFO [RS:0;5be53b084ac7:42795 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-11T20:13:45,533 DEBUG [RS:0;5be53b084ac7:42795 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/5be53b084ac7:0, corePoolSize=1, maxPoolSize=1 2024-12-11T20:13:45,533 DEBUG [RS:0;5be53b084ac7:42795 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/5be53b084ac7:0, corePoolSize=1, maxPoolSize=1 2024-12-11T20:13:45,533 DEBUG [RS:0;5be53b084ac7:42795 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/5be53b084ac7:0, corePoolSize=1, maxPoolSize=1 2024-12-11T20:13:45,533 DEBUG [RS:0;5be53b084ac7:42795 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/5be53b084ac7:0, corePoolSize=1, maxPoolSize=1 2024-12-11T20:13:45,533 DEBUG [RS:0;5be53b084ac7:42795 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/5be53b084ac7:0, corePoolSize=1, maxPoolSize=1 2024-12-11T20:13:45,536 DEBUG [RS:0;5be53b084ac7:42795 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/5be53b084ac7:0, corePoolSize=2, maxPoolSize=2 2024-12-11T20:13:45,536 DEBUG [RS:0;5be53b084ac7:42795 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/5be53b084ac7:0, corePoolSize=1, maxPoolSize=1 2024-12-11T20:13:45,536 DEBUG [RS:0;5be53b084ac7:42795 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/5be53b084ac7:0, corePoolSize=1, maxPoolSize=1 2024-12-11T20:13:45,536 DEBUG [RS:0;5be53b084ac7:42795 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/5be53b084ac7:0, corePoolSize=1, maxPoolSize=1 2024-12-11T20:13:45,536 DEBUG [RS:0;5be53b084ac7:42795 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/5be53b084ac7:0, corePoolSize=1, maxPoolSize=1 2024-12-11T20:13:45,536 DEBUG [RS:0;5be53b084ac7:42795 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/5be53b084ac7:0, corePoolSize=1, maxPoolSize=1 2024-12-11T20:13:45,536 DEBUG [RS:0;5be53b084ac7:42795 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/5be53b084ac7:0, corePoolSize=1, maxPoolSize=1 2024-12-11T20:13:45,536 DEBUG [RS:0;5be53b084ac7:42795 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0, corePoolSize=3, maxPoolSize=3 2024-12-11T20:13:45,537 DEBUG [RS:0;5be53b084ac7:42795 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/5be53b084ac7:0, corePoolSize=3, maxPoolSize=3 2024-12-11T20:13:45,538 INFO [RS:0;5be53b084ac7:42795 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-11T20:13:45,539 INFO [RS:0;5be53b084ac7:42795 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-11T20:13:45,539 INFO [RS:0;5be53b084ac7:42795 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-11T20:13:45,539 INFO [RS:0;5be53b084ac7:42795 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-11T20:13:45,539 INFO [RS:0;5be53b084ac7:42795 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-11T20:13:45,539 INFO [RS:0;5be53b084ac7:42795 {}] hbase.ChoreService(168): Chore ScheduledChore name=5be53b084ac7,42795,1733948024548-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-11T20:13:45,541 INFO [RS:2;5be53b084ac7:33319 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-11T20:13:45,544 INFO [RS:2;5be53b084ac7:33319 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-11T20:13:45,547 INFO [RS:2;5be53b084ac7:33319 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-11T20:13:45,547 INFO [RS:2;5be53b084ac7:33319 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-11T20:13:45,555 INFO [RS:2;5be53b084ac7:33319 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-11T20:13:45,561 INFO [RS:2;5be53b084ac7:33319 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-11T20:13:45,561 INFO [RS:2;5be53b084ac7:33319 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-11T20:13:45,561 INFO [RS:1;5be53b084ac7:42037 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-11T20:13:45,561 DEBUG [RS:2;5be53b084ac7:33319 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/5be53b084ac7:0, corePoolSize=1, maxPoolSize=1 2024-12-11T20:13:45,561 DEBUG [RS:2;5be53b084ac7:33319 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/5be53b084ac7:0, corePoolSize=1, maxPoolSize=1 2024-12-11T20:13:45,561 DEBUG [RS:2;5be53b084ac7:33319 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/5be53b084ac7:0, corePoolSize=1, maxPoolSize=1 2024-12-11T20:13:45,561 DEBUG [RS:2;5be53b084ac7:33319 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/5be53b084ac7:0, corePoolSize=1, maxPoolSize=1 2024-12-11T20:13:45,561 DEBUG [RS:2;5be53b084ac7:33319 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/5be53b084ac7:0, corePoolSize=1, maxPoolSize=1 2024-12-11T20:13:45,561 DEBUG [RS:2;5be53b084ac7:33319 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/5be53b084ac7:0, corePoolSize=2, maxPoolSize=2 2024-12-11T20:13:45,562 DEBUG [RS:2;5be53b084ac7:33319 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/5be53b084ac7:0, corePoolSize=1, maxPoolSize=1 2024-12-11T20:13:45,562 DEBUG [RS:2;5be53b084ac7:33319 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/5be53b084ac7:0, corePoolSize=1, maxPoolSize=1 2024-12-11T20:13:45,562 DEBUG [RS:2;5be53b084ac7:33319 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/5be53b084ac7:0, corePoolSize=1, maxPoolSize=1 2024-12-11T20:13:45,562 DEBUG [RS:2;5be53b084ac7:33319 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/5be53b084ac7:0, corePoolSize=1, maxPoolSize=1 2024-12-11T20:13:45,562 DEBUG [RS:2;5be53b084ac7:33319 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/5be53b084ac7:0, corePoolSize=1, maxPoolSize=1 2024-12-11T20:13:45,562 DEBUG [RS:2;5be53b084ac7:33319 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/5be53b084ac7:0, corePoolSize=1, maxPoolSize=1 2024-12-11T20:13:45,562 DEBUG [RS:2;5be53b084ac7:33319 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0, corePoolSize=3, maxPoolSize=3 2024-12-11T20:13:45,562 DEBUG [RS:2;5be53b084ac7:33319 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/5be53b084ac7:0, corePoolSize=3, maxPoolSize=3 2024-12-11T20:13:45,564 INFO [RS:2;5be53b084ac7:33319 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-11T20:13:45,564 INFO [RS:2;5be53b084ac7:33319 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-11T20:13:45,564 INFO [RS:2;5be53b084ac7:33319 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-11T20:13:45,564 INFO [RS:2;5be53b084ac7:33319 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-11T20:13:45,564 INFO [RS:2;5be53b084ac7:33319 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-11T20:13:45,564 INFO [RS:2;5be53b084ac7:33319 {}] hbase.ChoreService(168): Chore ScheduledChore name=5be53b084ac7,33319,1733948024700-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-11T20:13:45,576 INFO [RS:0;5be53b084ac7:42795 {}] regionserver.HeapMemoryManager(213): Starting, tuneOn=false 2024-12-11T20:13:45,576 INFO [RS:0;5be53b084ac7:42795 {}] hbase.ChoreService(168): Chore ScheduledChore name=5be53b084ac7,42795,1733948024548-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-11T20:13:45,577 INFO [RS:0;5be53b084ac7:42795 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-11T20:13:45,577 INFO [RS:0;5be53b084ac7:42795 {}] regionserver.Replication(171): 5be53b084ac7,42795,1733948024548 started 2024-12-11T20:13:45,587 INFO [RS:1;5be53b084ac7:42037 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-11T20:13:45,591 INFO [RS:1;5be53b084ac7:42037 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-11T20:13:45,591 INFO [RS:1;5be53b084ac7:42037 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-11T20:13:45,595 INFO [RS:1;5be53b084ac7:42037 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-11T20:13:45,601 INFO [RS:2;5be53b084ac7:33319 {}] regionserver.HeapMemoryManager(213): Starting, tuneOn=false 2024-12-11T20:13:45,602 INFO [RS:1;5be53b084ac7:42037 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-11T20:13:45,602 INFO [RS:1;5be53b084ac7:42037 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-11T20:13:45,602 DEBUG [RS:1;5be53b084ac7:42037 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/5be53b084ac7:0, corePoolSize=1, maxPoolSize=1 2024-12-11T20:13:45,602 DEBUG [RS:1;5be53b084ac7:42037 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/5be53b084ac7:0, corePoolSize=1, maxPoolSize=1 2024-12-11T20:13:45,602 DEBUG [RS:1;5be53b084ac7:42037 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/5be53b084ac7:0, corePoolSize=1, maxPoolSize=1 2024-12-11T20:13:45,602 DEBUG [RS:1;5be53b084ac7:42037 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/5be53b084ac7:0, corePoolSize=1, maxPoolSize=1 2024-12-11T20:13:45,603 DEBUG [RS:1;5be53b084ac7:42037 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/5be53b084ac7:0, corePoolSize=1, maxPoolSize=1 2024-12-11T20:13:45,603 DEBUG [RS:1;5be53b084ac7:42037 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/5be53b084ac7:0, corePoolSize=2, maxPoolSize=2 2024-12-11T20:13:45,603 DEBUG [RS:1;5be53b084ac7:42037 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/5be53b084ac7:0, corePoolSize=1, maxPoolSize=1 2024-12-11T20:13:45,603 DEBUG [RS:1;5be53b084ac7:42037 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/5be53b084ac7:0, corePoolSize=1, maxPoolSize=1 2024-12-11T20:13:45,603 DEBUG [RS:1;5be53b084ac7:42037 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/5be53b084ac7:0, corePoolSize=1, maxPoolSize=1 2024-12-11T20:13:45,603 DEBUG [RS:1;5be53b084ac7:42037 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/5be53b084ac7:0, corePoolSize=1, maxPoolSize=1 2024-12-11T20:13:45,603 DEBUG [RS:1;5be53b084ac7:42037 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/5be53b084ac7:0, corePoolSize=1, maxPoolSize=1 2024-12-11T20:13:45,603 DEBUG [RS:1;5be53b084ac7:42037 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/5be53b084ac7:0, corePoolSize=1, maxPoolSize=1 2024-12-11T20:13:45,603 DEBUG [RS:1;5be53b084ac7:42037 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0, corePoolSize=3, maxPoolSize=3 2024-12-11T20:13:45,603 DEBUG [RS:1;5be53b084ac7:42037 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/5be53b084ac7:0, corePoolSize=3, maxPoolSize=3 2024-12-11T20:13:45,604 INFO [RS:1;5be53b084ac7:42037 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-11T20:13:45,604 INFO [RS:1;5be53b084ac7:42037 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-11T20:13:45,604 INFO [RS:1;5be53b084ac7:42037 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-11T20:13:45,604 INFO [RS:1;5be53b084ac7:42037 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-11T20:13:45,604 INFO [RS:1;5be53b084ac7:42037 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-11T20:13:45,604 INFO [RS:1;5be53b084ac7:42037 {}] hbase.ChoreService(168): Chore ScheduledChore name=5be53b084ac7,42037,1733948024603-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-11T20:13:45,604 INFO [RS:2;5be53b084ac7:33319 {}] hbase.ChoreService(168): Chore ScheduledChore name=5be53b084ac7,33319,1733948024700-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-11T20:13:45,605 INFO [RS:2;5be53b084ac7:33319 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-11T20:13:45,605 INFO [RS:2;5be53b084ac7:33319 {}] regionserver.Replication(171): 5be53b084ac7,33319,1733948024700 started 2024-12-11T20:13:45,625 INFO [RS:0;5be53b084ac7:42795 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-11T20:13:45,625 INFO [RS:0;5be53b084ac7:42795 {}] regionserver.HRegionServer(1482): Serving as 5be53b084ac7,42795,1733948024548, RpcServer on 5be53b084ac7/172.17.0.2:42795, sessionid=0x1001690e4890001 2024-12-11T20:13:45,625 DEBUG [RS:0;5be53b084ac7:42795 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-11T20:13:45,625 DEBUG [RS:0;5be53b084ac7:42795 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 5be53b084ac7,42795,1733948024548 2024-12-11T20:13:45,625 DEBUG [RS:0;5be53b084ac7:42795 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '5be53b084ac7,42795,1733948024548' 2024-12-11T20:13:45,625 DEBUG [RS:0;5be53b084ac7:42795 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-11T20:13:45,627 DEBUG [RS:0;5be53b084ac7:42795 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-11T20:13:45,627 INFO [RS:1;5be53b084ac7:42037 {}] regionserver.HeapMemoryManager(213): Starting, tuneOn=false 2024-12-11T20:13:45,627 INFO [RS:1;5be53b084ac7:42037 {}] hbase.ChoreService(168): Chore ScheduledChore name=5be53b084ac7,42037,1733948024603-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-11T20:13:45,627 INFO [RS:1;5be53b084ac7:42037 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-11T20:13:45,628 INFO [RS:1;5be53b084ac7:42037 {}] regionserver.Replication(171): 5be53b084ac7,42037,1733948024603 started 2024-12-11T20:13:45,631 DEBUG [RS:0;5be53b084ac7:42795 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-11T20:13:45,631 DEBUG [RS:0;5be53b084ac7:42795 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-11T20:13:45,632 DEBUG [RS:0;5be53b084ac7:42795 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 5be53b084ac7,42795,1733948024548 2024-12-11T20:13:45,632 DEBUG [RS:0;5be53b084ac7:42795 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '5be53b084ac7,42795,1733948024548' 2024-12-11T20:13:45,632 DEBUG [RS:0;5be53b084ac7:42795 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-11T20:13:45,633 DEBUG [RS:0;5be53b084ac7:42795 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-11T20:13:45,633 DEBUG [RS:0;5be53b084ac7:42795 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-11T20:13:45,633 INFO [RS:0;5be53b084ac7:42795 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-11T20:13:45,634 INFO [RS:0;5be53b084ac7:42795 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-11T20:13:45,638 INFO [RS:2;5be53b084ac7:33319 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-11T20:13:45,638 INFO [RS:2;5be53b084ac7:33319 {}] regionserver.HRegionServer(1482): Serving as 5be53b084ac7,33319,1733948024700, RpcServer on 5be53b084ac7/172.17.0.2:33319, sessionid=0x1001690e4890003 2024-12-11T20:13:45,638 DEBUG [RS:2;5be53b084ac7:33319 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-11T20:13:45,638 DEBUG [RS:2;5be53b084ac7:33319 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 5be53b084ac7,33319,1733948024700 2024-12-11T20:13:45,638 DEBUG [RS:2;5be53b084ac7:33319 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '5be53b084ac7,33319,1733948024700' 2024-12-11T20:13:45,638 DEBUG [RS:2;5be53b084ac7:33319 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-11T20:13:45,639 DEBUG [RS:2;5be53b084ac7:33319 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-11T20:13:45,640 DEBUG [RS:2;5be53b084ac7:33319 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-11T20:13:45,640 DEBUG [RS:2;5be53b084ac7:33319 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-11T20:13:45,640 DEBUG [RS:2;5be53b084ac7:33319 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 5be53b084ac7,33319,1733948024700 2024-12-11T20:13:45,641 DEBUG [RS:2;5be53b084ac7:33319 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '5be53b084ac7,33319,1733948024700' 2024-12-11T20:13:45,641 DEBUG [RS:2;5be53b084ac7:33319 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-11T20:13:45,641 DEBUG [RS:2;5be53b084ac7:33319 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-11T20:13:45,647 DEBUG [RS:2;5be53b084ac7:33319 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-11T20:13:45,647 INFO [RS:2;5be53b084ac7:33319 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-11T20:13:45,647 INFO [RS:2;5be53b084ac7:33319 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-11T20:13:45,649 INFO [RS:1;5be53b084ac7:42037 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-11T20:13:45,650 INFO [RS:1;5be53b084ac7:42037 {}] regionserver.HRegionServer(1482): Serving as 5be53b084ac7,42037,1733948024603, RpcServer on 5be53b084ac7/172.17.0.2:42037, sessionid=0x1001690e4890002 2024-12-11T20:13:45,650 DEBUG [RS:1;5be53b084ac7:42037 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-11T20:13:45,650 DEBUG [RS:1;5be53b084ac7:42037 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 5be53b084ac7,42037,1733948024603 2024-12-11T20:13:45,650 DEBUG [RS:1;5be53b084ac7:42037 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '5be53b084ac7,42037,1733948024603' 2024-12-11T20:13:45,650 DEBUG [RS:1;5be53b084ac7:42037 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-11T20:13:45,653 DEBUG [RS:1;5be53b084ac7:42037 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-11T20:13:45,653 DEBUG [RS:1;5be53b084ac7:42037 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-11T20:13:45,654 DEBUG [RS:1;5be53b084ac7:42037 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-11T20:13:45,654 DEBUG [RS:1;5be53b084ac7:42037 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 5be53b084ac7,42037,1733948024603 2024-12-11T20:13:45,654 DEBUG [RS:1;5be53b084ac7:42037 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '5be53b084ac7,42037,1733948024603' 2024-12-11T20:13:45,654 DEBUG [RS:1;5be53b084ac7:42037 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-11T20:13:45,659 DEBUG [RS:1;5be53b084ac7:42037 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-11T20:13:45,663 DEBUG [RS:1;5be53b084ac7:42037 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-11T20:13:45,663 INFO [RS:1;5be53b084ac7:42037 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-11T20:13:45,663 INFO [RS:1;5be53b084ac7:42037 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-11T20:13:45,744 INFO [RS:0;5be53b084ac7:42795 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=5be53b084ac7%2C42795%2C1733948024548, suffix=, logDir=hdfs://localhost:40513/user/jenkins/test-data/c4cda24e-7a10-f29a-6e48-0386002ed532/WALs/5be53b084ac7,42795,1733948024548, archiveDir=hdfs://localhost:40513/user/jenkins/test-data/c4cda24e-7a10-f29a-6e48-0386002ed532/oldWALs, maxLogs=32 2024-12-11T20:13:45,745 INFO [RS:0;5be53b084ac7:42795 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 5be53b084ac7%2C42795%2C1733948024548.1733948025745 2024-12-11T20:13:45,750 INFO [RS:2;5be53b084ac7:33319 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=5be53b084ac7%2C33319%2C1733948024700, suffix=, logDir=hdfs://localhost:40513/user/jenkins/test-data/c4cda24e-7a10-f29a-6e48-0386002ed532/WALs/5be53b084ac7,33319,1733948024700, archiveDir=hdfs://localhost:40513/user/jenkins/test-data/c4cda24e-7a10-f29a-6e48-0386002ed532/oldWALs, maxLogs=32 2024-12-11T20:13:45,752 INFO [RS:2;5be53b084ac7:33319 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 5be53b084ac7%2C33319%2C1733948024700.1733948025751 2024-12-11T20:13:45,766 INFO [RS:1;5be53b084ac7:42037 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=5be53b084ac7%2C42037%2C1733948024603, suffix=, logDir=hdfs://localhost:40513/user/jenkins/test-data/c4cda24e-7a10-f29a-6e48-0386002ed532/WALs/5be53b084ac7,42037,1733948024603, archiveDir=hdfs://localhost:40513/user/jenkins/test-data/c4cda24e-7a10-f29a-6e48-0386002ed532/oldWALs, maxLogs=32 2024-12-11T20:13:45,767 INFO [RS:1;5be53b084ac7:42037 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 5be53b084ac7%2C42037%2C1733948024603.1733948025767 2024-12-11T20:13:45,781 INFO [RS:2;5be53b084ac7:33319 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/c4cda24e-7a10-f29a-6e48-0386002ed532/WALs/5be53b084ac7,33319,1733948024700/5be53b084ac7%2C33319%2C1733948024700.1733948025751 2024-12-11T20:13:45,788 INFO [RS:0;5be53b084ac7:42795 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/c4cda24e-7a10-f29a-6e48-0386002ed532/WALs/5be53b084ac7,42795,1733948024548/5be53b084ac7%2C42795%2C1733948024548.1733948025745 2024-12-11T20:13:45,807 DEBUG [RS:2;5be53b084ac7:33319 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33311:33311),(127.0.0.1/127.0.0.1:38789:38789),(127.0.0.1/127.0.0.1:36249:36249)] 2024-12-11T20:13:45,815 DEBUG [RS:0;5be53b084ac7:42795 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33311:33311),(127.0.0.1/127.0.0.1:36249:36249),(127.0.0.1/127.0.0.1:38789:38789)] 2024-12-11T20:13:45,817 INFO [RS:1;5be53b084ac7:42037 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/c4cda24e-7a10-f29a-6e48-0386002ed532/WALs/5be53b084ac7,42037,1733948024603/5be53b084ac7%2C42037%2C1733948024603.1733948025767 2024-12-11T20:13:45,830 DEBUG [RS:1;5be53b084ac7:42037 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36249:36249),(127.0.0.1/127.0.0.1:33311:33311),(127.0.0.1/127.0.0.1:38789:38789)] 2024-12-11T20:13:45,844 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T20:13:45,845 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-11T20:13:45,848 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-11T20:13:45,848 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T20:13:45,849 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-11T20:13:45,849 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-11T20:13:45,851 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-11T20:13:45,851 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T20:13:45,851 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-11T20:13:45,851 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-11T20:13:45,853 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-11T20:13:45,853 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T20:13:45,854 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-11T20:13:45,854 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-11T20:13:45,855 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-11T20:13:45,855 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T20:13:45,856 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-11T20:13:45,856 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-11T20:13:45,857 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40513/user/jenkins/test-data/c4cda24e-7a10-f29a-6e48-0386002ed532/data/hbase/meta/1588230740 2024-12-11T20:13:45,858 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40513/user/jenkins/test-data/c4cda24e-7a10-f29a-6e48-0386002ed532/data/hbase/meta/1588230740 2024-12-11T20:13:45,859 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-11T20:13:45,859 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-11T20:13:45,860 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-11T20:13:45,862 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-11T20:13:45,868 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40513/user/jenkins/test-data/c4cda24e-7a10-f29a-6e48-0386002ed532/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-11T20:13:45,869 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=65530396, jitterRate=-0.023521006107330322}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-11T20:13:45,870 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733948025844Initializing all the Stores at 1733948025845 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733948025845Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733948025845Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733948025845Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733948025845Cleaning up temporary data from old regions at 1733948025860 (+15 ms)Region opened successfully at 1733948025870 (+10 ms) 2024-12-11T20:13:45,870 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-11T20:13:45,870 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-11T20:13:45,870 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-11T20:13:45,870 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-11T20:13:45,870 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-11T20:13:45,871 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-11T20:13:45,871 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733948025870Disabling compacts and flushes for region at 1733948025870Disabling writes for close at 1733948025870Writing region close event to WAL at 1733948025871 (+1 ms)Closed at 1733948025871 2024-12-11T20:13:45,873 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-11T20:13:45,873 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-11T20:13:45,873 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-11T20:13:45,875 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-11T20:13:45,877 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-11T20:13:46,027 DEBUG [5be53b084ac7:46085 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=3, allServersCount=3 2024-12-11T20:13:46,028 DEBUG [5be53b084ac7:46085 {}] balancer.BalancerClusterState(204): Hosts are {5be53b084ac7=0} racks are {/default-rack=0} 2024-12-11T20:13:46,031 DEBUG [5be53b084ac7:46085 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-11T20:13:46,031 DEBUG [5be53b084ac7:46085 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-11T20:13:46,031 DEBUG [5be53b084ac7:46085 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-11T20:13:46,031 DEBUG [5be53b084ac7:46085 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-11T20:13:46,031 DEBUG [5be53b084ac7:46085 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-11T20:13:46,031 DEBUG [5be53b084ac7:46085 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-11T20:13:46,031 INFO [5be53b084ac7:46085 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-11T20:13:46,031 INFO [5be53b084ac7:46085 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-11T20:13:46,031 INFO [5be53b084ac7:46085 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-11T20:13:46,031 DEBUG [5be53b084ac7:46085 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-11T20:13:46,032 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=5be53b084ac7,33319,1733948024700 2024-12-11T20:13:46,034 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 5be53b084ac7,33319,1733948024700, state=OPENING 2024-12-11T20:13:46,076 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-11T20:13:46,084 DEBUG [pool-336-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33319-0x1001690e4890003, quorum=127.0.0.1:53625, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T20:13:46,084 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46085-0x1001690e4890000, quorum=127.0.0.1:53625, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T20:13:46,084 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42795-0x1001690e4890001, quorum=127.0.0.1:53625, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T20:13:46,084 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42037-0x1001690e4890002, quorum=127.0.0.1:53625, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T20:13:46,085 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-11T20:13:46,085 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-11T20:13:46,085 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-11T20:13:46,086 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-11T20:13:46,086 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=5be53b084ac7,33319,1733948024700}] 2024-12-11T20:13:46,087 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-11T20:13:46,241 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-11T20:13:46,243 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-10-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51647, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-11T20:13:46,258 INFO [RS_OPEN_META-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-11T20:13:46,259 INFO [RS_OPEN_META-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-11T20:13:46,262 INFO [RS_OPEN_META-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=5be53b084ac7%2C33319%2C1733948024700.meta, suffix=.meta, logDir=hdfs://localhost:40513/user/jenkins/test-data/c4cda24e-7a10-f29a-6e48-0386002ed532/WALs/5be53b084ac7,33319,1733948024700, archiveDir=hdfs://localhost:40513/user/jenkins/test-data/c4cda24e-7a10-f29a-6e48-0386002ed532/oldWALs, maxLogs=32 2024-12-11T20:13:46,263 INFO [RS_OPEN_META-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 5be53b084ac7%2C33319%2C1733948024700.meta.1733948026263.meta 2024-12-11T20:13:46,287 INFO [RS_OPEN_META-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/c4cda24e-7a10-f29a-6e48-0386002ed532/WALs/5be53b084ac7,33319,1733948024700/5be53b084ac7%2C33319%2C1733948024700.meta.1733948026263.meta 2024-12-11T20:13:46,293 DEBUG [RS_OPEN_META-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33311:33311),(127.0.0.1/127.0.0.1:38789:38789),(127.0.0.1/127.0.0.1:36249:36249)] 2024-12-11T20:13:46,311 DEBUG [RS_OPEN_META-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-11T20:13:46,311 DEBUG [RS_OPEN_META-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-11T20:13:46,311 DEBUG [RS_OPEN_META-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-11T20:13:46,312 INFO [RS_OPEN_META-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-11T20:13:46,312 DEBUG [RS_OPEN_META-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-11T20:13:46,312 DEBUG [RS_OPEN_META-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T20:13:46,312 DEBUG [RS_OPEN_META-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-11T20:13:46,312 DEBUG [RS_OPEN_META-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-11T20:13:46,317 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-11T20:13:46,318 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-11T20:13:46,318 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T20:13:46,319 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-11T20:13:46,319 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-11T20:13:46,321 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-11T20:13:46,321 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T20:13:46,322 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-11T20:13:46,322 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-11T20:13:46,323 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-11T20:13:46,323 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T20:13:46,324 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-11T20:13:46,324 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-11T20:13:46,325 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-11T20:13:46,325 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T20:13:46,326 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-11T20:13:46,326 DEBUG [RS_OPEN_META-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-11T20:13:46,327 DEBUG [RS_OPEN_META-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40513/user/jenkins/test-data/c4cda24e-7a10-f29a-6e48-0386002ed532/data/hbase/meta/1588230740 2024-12-11T20:13:46,329 DEBUG [RS_OPEN_META-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40513/user/jenkins/test-data/c4cda24e-7a10-f29a-6e48-0386002ed532/data/hbase/meta/1588230740 2024-12-11T20:13:46,330 DEBUG [RS_OPEN_META-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-11T20:13:46,330 DEBUG [RS_OPEN_META-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-11T20:13:46,331 DEBUG [RS_OPEN_META-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-11T20:13:46,333 DEBUG [RS_OPEN_META-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-11T20:13:46,334 INFO [RS_OPEN_META-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=62960894, jitterRate=-0.06180956959724426}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-11T20:13:46,334 DEBUG [RS_OPEN_META-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-11T20:13:46,335 DEBUG [RS_OPEN_META-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733948026312Writing region info on filesystem at 1733948026312Initializing all the Stores at 1733948026316 (+4 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733948026316Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733948026316Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733948026316Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733948026316Cleaning up temporary data from old regions at 1733948026330 (+14 ms)Running coprocessor post-open hooks at 1733948026334 (+4 ms)Region opened successfully at 1733948026335 (+1 ms) 2024-12-11T20:13:46,337 INFO [RS_OPEN_META-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733948026240 2024-12-11T20:13:46,341 DEBUG [RS_OPEN_META-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-11T20:13:46,341 INFO [RS_OPEN_META-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-11T20:13:46,341 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=5be53b084ac7,33319,1733948024700 2024-12-11T20:13:46,343 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 5be53b084ac7,33319,1733948024700, state=OPEN 2024-12-11T20:13:46,376 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46085-0x1001690e4890000, quorum=127.0.0.1:53625, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-11T20:13:46,376 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42037-0x1001690e4890002, quorum=127.0.0.1:53625, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-11T20:13:46,376 DEBUG [pool-336-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33319-0x1001690e4890003, quorum=127.0.0.1:53625, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-11T20:13:46,376 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42795-0x1001690e4890001, quorum=127.0.0.1:53625, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-11T20:13:46,376 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=5be53b084ac7,33319,1733948024700 2024-12-11T20:13:46,376 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-11T20:13:46,376 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-11T20:13:46,376 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-11T20:13:46,376 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-11T20:13:46,381 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-11T20:13:46,381 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=5be53b084ac7,33319,1733948024700 in 290 msec 2024-12-11T20:13:46,386 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-11T20:13:46,386 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 508 msec 2024-12-11T20:13:46,388 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-11T20:13:46,388 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-11T20:13:46,389 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-11T20:13:46,389 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5be53b084ac7,33319,1733948024700, seqNum=-1] 2024-12-11T20:13:46,390 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T20:13:46,392 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46789, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T20:13:46,402 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.0770 sec 2024-12-11T20:13:46,402 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733948026402, completionTime=-1 2024-12-11T20:13:46,402 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=3; waited=0ms, expected min=3 server(s), max=3 server(s), master is running 2024-12-11T20:13:46,402 DEBUG [master/5be53b084ac7:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-11T20:13:46,405 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=3 2024-12-11T20:13:46,406 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733948086405 2024-12-11T20:13:46,406 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733948146406 2024-12-11T20:13:46,406 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 3 msec 2024-12-11T20:13:46,406 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5be53b084ac7,46085,1733948024383-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-11T20:13:46,406 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5be53b084ac7,46085,1733948024383-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-11T20:13:46,406 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5be53b084ac7,46085,1733948024383-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-11T20:13:46,406 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-5be53b084ac7:46085, period=300000, unit=MILLISECONDS is enabled. 2024-12-11T20:13:46,406 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-11T20:13:46,407 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-11T20:13:46,410 DEBUG [master/5be53b084ac7:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-11T20:13:46,412 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.611sec 2024-12-11T20:13:46,412 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-11T20:13:46,412 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-11T20:13:46,412 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-11T20:13:46,412 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-11T20:13:46,412 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-11T20:13:46,413 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5be53b084ac7,46085,1733948024383-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-11T20:13:46,413 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5be53b084ac7,46085,1733948024383-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-11T20:13:46,416 DEBUG [master/5be53b084ac7:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-11T20:13:46,416 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-11T20:13:46,416 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5be53b084ac7,46085,1733948024383-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-11T20:13:46,469 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@41ffef44, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T20:13:46,470 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 5be53b084ac7,46085,-1 for getting cluster id 2024-12-11T20:13:46,470 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-11T20:13:46,471 DEBUG [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'ad2ac32b-7078-490c-8d86-abc1c963290d' 2024-12-11T20:13:46,471 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-11T20:13:46,472 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "ad2ac32b-7078-490c-8d86-abc1c963290d" 2024-12-11T20:13:46,472 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@24fd2ce4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T20:13:46,472 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5be53b084ac7,46085,-1] 2024-12-11T20:13:46,472 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-11T20:13:46,473 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T20:13:46,474 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39642, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-11T20:13:46,475 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5cda9796, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T20:13:46,476 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-11T20:13:46,477 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5be53b084ac7,33319,1733948024700, seqNum=-1] 2024-12-11T20:13:46,478 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T20:13:46,480 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-10-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55008, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T20:13:46,482 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=5be53b084ac7,46085,1733948024383 2024-12-11T20:13:46,482 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-11T20:13:46,484 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] client.AsyncConnectionImpl(321): The fetched master address is 5be53b084ac7,46085,1733948024383 2024-12-11T20:13:46,484 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@2405d7f0 2024-12-11T20:13:46,484 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-11T20:13:46,486 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39644, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-11T20:13:46,487 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46085 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-11T20:13:46,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46085 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC 2024-12-11T20:13:46,491 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_PRE_OPERATION 2024-12-11T20:13:46,491 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T20:13:46,491 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46085 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestHBaseWalOnEC" procId is: 4 2024-12-11T20:13:46,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46085 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-11T20:13:46,493 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-11T20:13:46,501 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34453 is added to blk_1073741837_1013 (size=392) 2024-12-11T20:13:46,502 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41547 is added to blk_1073741837_1013 (size=392) 2024-12-11T20:13:46,502 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44659 is added to blk_1073741837_1013 (size=392) 2024-12-11T20:13:46,504 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 913a3cf1b4a7cc7c1fdd6643b871acc5, NAME => 'TestHBaseWalOnEC,,1733948026486.913a3cf1b4a7cc7c1fdd6643b871acc5.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:40513/user/jenkins/test-data/c4cda24e-7a10-f29a-6e48-0386002ed532 2024-12-11T20:13:46,520 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41547 is added to blk_1073741838_1014 (size=51) 2024-12-11T20:13:46,521 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34453 is added to blk_1073741838_1014 (size=51) 2024-12-11T20:13:46,521 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44659 is added to blk_1073741838_1014 (size=51) 2024-12-11T20:13:46,522 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1733948026486.913a3cf1b4a7cc7c1fdd6643b871acc5.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T20:13:46,522 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1722): Closing 913a3cf1b4a7cc7c1fdd6643b871acc5, disabling compactions & flushes 2024-12-11T20:13:46,522 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1733948026486.913a3cf1b4a7cc7c1fdd6643b871acc5. 2024-12-11T20:13:46,522 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1733948026486.913a3cf1b4a7cc7c1fdd6643b871acc5. 2024-12-11T20:13:46,522 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1733948026486.913a3cf1b4a7cc7c1fdd6643b871acc5. after waiting 0 ms 2024-12-11T20:13:46,522 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1733948026486.913a3cf1b4a7cc7c1fdd6643b871acc5. 2024-12-11T20:13:46,522 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1733948026486.913a3cf1b4a7cc7c1fdd6643b871acc5. 2024-12-11T20:13:46,522 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1676): Region close journal for 913a3cf1b4a7cc7c1fdd6643b871acc5: Waiting for close lock at 1733948026522Disabling compacts and flushes for region at 1733948026522Disabling writes for close at 1733948026522Writing region close event to WAL at 1733948026522Closed at 1733948026522 2024-12-11T20:13:46,524 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ADD_TO_META 2024-12-11T20:13:46,525 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestHBaseWalOnEC,,1733948026486.913a3cf1b4a7cc7c1fdd6643b871acc5.","families":{"info":[{"qualifier":"regioninfo","vlen":50,"tag":[],"timestamp":"1733948026524"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733948026524"}]},"ts":"1733948026524"} 2024-12-11T20:13:46,528 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-11T20:13:46,530 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-11T20:13:46,530 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733948026530"}]},"ts":"1733948026530"} 2024-12-11T20:13:46,533 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLING in hbase:meta 2024-12-11T20:13:46,533 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {5be53b084ac7=0} racks are {/default-rack=0} 2024-12-11T20:13:46,534 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-11T20:13:46,534 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-11T20:13:46,534 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-11T20:13:46,534 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-11T20:13:46,534 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-11T20:13:46,534 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-11T20:13:46,534 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-11T20:13:46,534 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-11T20:13:46,534 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-11T20:13:46,534 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-11T20:13:46,534 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=913a3cf1b4a7cc7c1fdd6643b871acc5, ASSIGN}] 2024-12-11T20:13:46,536 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=913a3cf1b4a7cc7c1fdd6643b871acc5, ASSIGN 2024-12-11T20:13:46,538 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=913a3cf1b4a7cc7c1fdd6643b871acc5, ASSIGN; state=OFFLINE, location=5be53b084ac7,42037,1733948024603; forceNewPlan=false, retain=false 2024-12-11T20:13:46,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46085 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-11T20:13:46,689 INFO [5be53b084ac7:46085 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-11T20:13:46,689 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=913a3cf1b4a7cc7c1fdd6643b871acc5, regionState=OPENING, regionLocation=5be53b084ac7,42037,1733948024603 2024-12-11T20:13:46,693 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-10-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=913a3cf1b4a7cc7c1fdd6643b871acc5, ASSIGN because future has completed 2024-12-11T20:13:46,694 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 913a3cf1b4a7cc7c1fdd6643b871acc5, server=5be53b084ac7,42037,1733948024603}] 2024-12-11T20:13:46,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46085 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-11T20:13:46,848 DEBUG [RSProcedureDispatcher-pool-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-11T20:13:46,850 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-9-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51721, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-11T20:13:46,854 INFO [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestHBaseWalOnEC,,1733948026486.913a3cf1b4a7cc7c1fdd6643b871acc5. 2024-12-11T20:13:46,855 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 913a3cf1b4a7cc7c1fdd6643b871acc5, NAME => 'TestHBaseWalOnEC,,1733948026486.913a3cf1b4a7cc7c1fdd6643b871acc5.', STARTKEY => '', ENDKEY => ''} 2024-12-11T20:13:46,855 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestHBaseWalOnEC 913a3cf1b4a7cc7c1fdd6643b871acc5 2024-12-11T20:13:46,855 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1733948026486.913a3cf1b4a7cc7c1fdd6643b871acc5.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T20:13:46,855 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 913a3cf1b4a7cc7c1fdd6643b871acc5 2024-12-11T20:13:46,856 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 913a3cf1b4a7cc7c1fdd6643b871acc5 2024-12-11T20:13:46,857 INFO [StoreOpener-913a3cf1b4a7cc7c1fdd6643b871acc5-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 913a3cf1b4a7cc7c1fdd6643b871acc5 2024-12-11T20:13:46,859 INFO [StoreOpener-913a3cf1b4a7cc7c1fdd6643b871acc5-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 913a3cf1b4a7cc7c1fdd6643b871acc5 columnFamilyName cf 2024-12-11T20:13:46,859 DEBUG [StoreOpener-913a3cf1b4a7cc7c1fdd6643b871acc5-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T20:13:46,860 INFO [StoreOpener-913a3cf1b4a7cc7c1fdd6643b871acc5-1 {}] regionserver.HStore(327): Store=913a3cf1b4a7cc7c1fdd6643b871acc5/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T20:13:46,860 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 913a3cf1b4a7cc7c1fdd6643b871acc5 2024-12-11T20:13:46,861 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40513/user/jenkins/test-data/c4cda24e-7a10-f29a-6e48-0386002ed532/data/default/TestHBaseWalOnEC/913a3cf1b4a7cc7c1fdd6643b871acc5 2024-12-11T20:13:46,862 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40513/user/jenkins/test-data/c4cda24e-7a10-f29a-6e48-0386002ed532/data/default/TestHBaseWalOnEC/913a3cf1b4a7cc7c1fdd6643b871acc5 2024-12-11T20:13:46,862 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 913a3cf1b4a7cc7c1fdd6643b871acc5 2024-12-11T20:13:46,862 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 913a3cf1b4a7cc7c1fdd6643b871acc5 2024-12-11T20:13:46,865 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 913a3cf1b4a7cc7c1fdd6643b871acc5 2024-12-11T20:13:46,868 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40513/user/jenkins/test-data/c4cda24e-7a10-f29a-6e48-0386002ed532/data/default/TestHBaseWalOnEC/913a3cf1b4a7cc7c1fdd6643b871acc5/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-11T20:13:46,869 INFO [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 913a3cf1b4a7cc7c1fdd6643b871acc5; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=71120316, jitterRate=0.05977529287338257}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-11T20:13:46,870 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 913a3cf1b4a7cc7c1fdd6643b871acc5 2024-12-11T20:13:46,871 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 913a3cf1b4a7cc7c1fdd6643b871acc5: Running coprocessor pre-open hook at 1733948026856Writing region info on filesystem at 1733948026856Initializing all the Stores at 1733948026857 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733948026857Cleaning up temporary data from old regions at 1733948026863 (+6 ms)Running coprocessor post-open hooks at 1733948026870 (+7 ms)Region opened successfully at 1733948026871 (+1 ms) 2024-12-11T20:13:46,873 INFO [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestHBaseWalOnEC,,1733948026486.913a3cf1b4a7cc7c1fdd6643b871acc5., pid=6, masterSystemTime=1733948026848 2024-12-11T20:13:46,876 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestHBaseWalOnEC,,1733948026486.913a3cf1b4a7cc7c1fdd6643b871acc5. 2024-12-11T20:13:46,876 INFO [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestHBaseWalOnEC,,1733948026486.913a3cf1b4a7cc7c1fdd6643b871acc5. 2024-12-11T20:13:46,877 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=913a3cf1b4a7cc7c1fdd6643b871acc5, regionState=OPEN, openSeqNum=2, regionLocation=5be53b084ac7,42037,1733948024603 2024-12-11T20:13:46,881 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-10-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 913a3cf1b4a7cc7c1fdd6643b871acc5, server=5be53b084ac7,42037,1733948024603 because future has completed 2024-12-11T20:13:46,887 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-11T20:13:46,888 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 913a3cf1b4a7cc7c1fdd6643b871acc5, server=5be53b084ac7,42037,1733948024603 in 189 msec 2024-12-11T20:13:46,892 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-11T20:13:46,892 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=913a3cf1b4a7cc7c1fdd6643b871acc5, ASSIGN in 353 msec 2024-12-11T20:13:46,894 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-11T20:13:46,894 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733948026894"}]},"ts":"1733948026894"} 2024-12-11T20:13:46,897 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLED in hbase:meta 2024-12-11T20:13:46,900 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_POST_OPERATION 2024-12-11T20:13:46,904 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC in 413 msec 2024-12-11T20:13:47,122 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-11T20:13:47,123 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-12-11T20:13:47,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46085 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-11T20:13:47,124 INFO [RPCClient-NioEventLoopGroup-6-8 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestHBaseWalOnEC completed 2024-12-11T20:13:47,124 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table TestHBaseWalOnEC get assigned. Timeout = 60000ms 2024-12-11T20:13:47,124 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-11T20:13:47,126 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-11T20:13:47,126 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-12-11T20:13:47,126 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-12-11T20:13:47,126 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-12-11T20:13:47,128 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestHBaseWalOnEC 2024-12-11T20:13:47,128 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestHBaseWalOnEC Metrics about Tables on a single HBase RegionServer 2024-12-11T20:13:47,130 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table TestHBaseWalOnEC assigned to meta. Checking AM states. 2024-12-11T20:13:47,130 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-11T20:13:47,130 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table TestHBaseWalOnEC assigned. 2024-12-11T20:13:47,135 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestHBaseWalOnEC', row='row', locateType=CURRENT is [region=TestHBaseWalOnEC,,1733948026486.913a3cf1b4a7cc7c1fdd6643b871acc5., hostname=5be53b084ac7,42037,1733948024603, seqNum=2] 2024-12-11T20:13:47,136 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T20:13:47,139 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-9-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40584, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T20:13:47,144 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46085 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestHBaseWalOnEC 2024-12-11T20:13:47,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46085 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC 2024-12-11T20:13:47,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46085 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-11T20:13:47,148 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_PREPARE 2024-12-11T20:13:47,150 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-11T20:13:47,151 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-11T20:13:47,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46085 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-11T20:13:47,307 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42037 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-12-11T20:13:47,308 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestHBaseWalOnEC,,1733948026486.913a3cf1b4a7cc7c1fdd6643b871acc5. 2024-12-11T20:13:47,308 INFO [RS_FLUSH_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing 913a3cf1b4a7cc7c1fdd6643b871acc5 1/1 column families, dataSize=32 B heapSize=360 B 2024-12-11T20:13:47,333 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40513/user/jenkins/test-data/c4cda24e-7a10-f29a-6e48-0386002ed532/data/default/TestHBaseWalOnEC/913a3cf1b4a7cc7c1fdd6643b871acc5/.tmp/cf/3ef9c33aa2224efe912f3442199fc308 is 36, key is row/cf:cq/1733948027140/Put/seqid=0 2024-12-11T20:13:47,343 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41547 is added to blk_1073741839_1015 (size=4787) 2024-12-11T20:13:47,343 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44659 is added to blk_1073741839_1015 (size=4787) 2024-12-11T20:13:47,343 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34453 is added to blk_1073741839_1015 (size=4787) 2024-12-11T20:13:47,345 INFO [RS_FLUSH_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=32 B at sequenceid=5 (bloomFilter=false), to=hdfs://localhost:40513/user/jenkins/test-data/c4cda24e-7a10-f29a-6e48-0386002ed532/data/default/TestHBaseWalOnEC/913a3cf1b4a7cc7c1fdd6643b871acc5/.tmp/cf/3ef9c33aa2224efe912f3442199fc308 2024-12-11T20:13:47,355 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40513/user/jenkins/test-data/c4cda24e-7a10-f29a-6e48-0386002ed532/data/default/TestHBaseWalOnEC/913a3cf1b4a7cc7c1fdd6643b871acc5/.tmp/cf/3ef9c33aa2224efe912f3442199fc308 as hdfs://localhost:40513/user/jenkins/test-data/c4cda24e-7a10-f29a-6e48-0386002ed532/data/default/TestHBaseWalOnEC/913a3cf1b4a7cc7c1fdd6643b871acc5/cf/3ef9c33aa2224efe912f3442199fc308 2024-12-11T20:13:47,366 INFO [RS_FLUSH_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40513/user/jenkins/test-data/c4cda24e-7a10-f29a-6e48-0386002ed532/data/default/TestHBaseWalOnEC/913a3cf1b4a7cc7c1fdd6643b871acc5/cf/3ef9c33aa2224efe912f3442199fc308, entries=1, sequenceid=5, filesize=4.7 K 2024-12-11T20:13:47,368 INFO [RS_FLUSH_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~32 B/32, heapSize ~344 B/344, currentSize=0 B/0 for 913a3cf1b4a7cc7c1fdd6643b871acc5 in 60ms, sequenceid=5, compaction requested=false 2024-12-11T20:13:47,368 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for 913a3cf1b4a7cc7c1fdd6643b871acc5: 2024-12-11T20:13:47,368 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestHBaseWalOnEC,,1733948026486.913a3cf1b4a7cc7c1fdd6643b871acc5. 2024-12-11T20:13:47,368 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-12-11T20:13:47,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46085 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-12-11T20:13:47,376 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-12-11T20:13:47,376 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 220 msec 2024-12-11T20:13:47,381 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC in 234 msec 2024-12-11T20:13:47,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46085 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-11T20:13:47,463 INFO [RPCClient-NioEventLoopGroup-6-8 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestHBaseWalOnEC completed 2024-12-11T20:13:47,469 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-11T20:13:47,469 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-11T20:13:47,470 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-11T20:13:47,470 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T20:13:47,470 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T20:13:47,470 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-11T20:13:47,470 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-11T20:13:47,470 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1807522530, stopped=false 2024-12-11T20:13:47,470 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=5be53b084ac7,46085,1733948024383 2024-12-11T20:13:47,493 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42795-0x1001690e4890001, quorum=127.0.0.1:53625, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-11T20:13:47,493 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46085-0x1001690e4890000, quorum=127.0.0.1:53625, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-11T20:13:47,493 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-11T20:13:47,493 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42795-0x1001690e4890001, quorum=127.0.0.1:53625, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T20:13:47,493 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46085-0x1001690e4890000, quorum=127.0.0.1:53625, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T20:13:47,493 DEBUG [pool-336-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33319-0x1001690e4890003, quorum=127.0.0.1:53625, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-11T20:13:47,493 DEBUG [pool-336-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33319-0x1001690e4890003, quorum=127.0.0.1:53625, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T20:13:47,493 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-11T20:13:47,493 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42037-0x1001690e4890002, quorum=127.0.0.1:53625, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-11T20:13:47,493 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42037-0x1001690e4890002, quorum=127.0.0.1:53625, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T20:13:47,493 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-11T20:13:47,493 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T20:13:47,493 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '5be53b084ac7,42795,1733948024548' ***** 2024-12-11T20:13:47,493 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-11T20:13:47,493 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '5be53b084ac7,42037,1733948024603' ***** 2024-12-11T20:13:47,494 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-11T20:13:47,494 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '5be53b084ac7,33319,1733948024700' ***** 2024-12-11T20:13:47,494 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-11T20:13:47,494 INFO [RS:2;5be53b084ac7:33319 {}] regionserver.HeapMemoryManager(224): Stopping 2024-12-11T20:13:47,494 INFO [RS:0;5be53b084ac7:42795 {}] regionserver.HeapMemoryManager(224): Stopping 2024-12-11T20:13:47,494 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:46085-0x1001690e4890000, quorum=127.0.0.1:53625, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-11T20:13:47,494 INFO [RS:2;5be53b084ac7:33319 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-11T20:13:47,494 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-11T20:13:47,494 INFO [RS:1;5be53b084ac7:42037 {}] regionserver.HeapMemoryManager(224): Stopping 2024-12-11T20:13:47,494 INFO [RS:2;5be53b084ac7:33319 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-11T20:13:47,495 INFO [RS:2;5be53b084ac7:33319 {}] regionserver.HRegionServer(959): stopping server 5be53b084ac7,33319,1733948024700 2024-12-11T20:13:47,495 INFO [RS:1;5be53b084ac7:42037 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-11T20:13:47,495 INFO [RS:2;5be53b084ac7:33319 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-11T20:13:47,495 INFO [RS:1;5be53b084ac7:42037 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-11T20:13:47,495 INFO [RS:1;5be53b084ac7:42037 {}] regionserver.HRegionServer(3091): Received CLOSE for 913a3cf1b4a7cc7c1fdd6643b871acc5 2024-12-11T20:13:47,496 INFO [RS:2;5be53b084ac7:33319 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:2;5be53b084ac7:33319. 2024-12-11T20:13:47,495 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-11T20:13:47,496 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:33319-0x1001690e4890003, quorum=127.0.0.1:53625, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-11T20:13:47,496 DEBUG [RS:2;5be53b084ac7:33319 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-11T20:13:47,494 INFO [RS:0;5be53b084ac7:42795 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-11T20:13:47,495 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:42795-0x1001690e4890001, quorum=127.0.0.1:53625, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-11T20:13:47,496 DEBUG [RS:2;5be53b084ac7:33319 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T20:13:47,495 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:42037-0x1001690e4890002, quorum=127.0.0.1:53625, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-11T20:13:47,496 INFO [RS:0;5be53b084ac7:42795 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-11T20:13:47,496 INFO [RS:1;5be53b084ac7:42037 {}] regionserver.HRegionServer(959): stopping server 5be53b084ac7,42037,1733948024603 2024-12-11T20:13:47,496 INFO [RS:2;5be53b084ac7:33319 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-11T20:13:47,496 INFO [RS:1;5be53b084ac7:42037 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-11T20:13:47,496 INFO [RS:2;5be53b084ac7:33319 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-11T20:13:47,496 INFO [RS:1;5be53b084ac7:42037 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;5be53b084ac7:42037. 2024-12-11T20:13:47,496 INFO [RS:2;5be53b084ac7:33319 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-11T20:13:47,496 DEBUG [RS:1;5be53b084ac7:42037 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-11T20:13:47,496 DEBUG [RS:1;5be53b084ac7:42037 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T20:13:47,496 INFO [RS:1;5be53b084ac7:42037 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-12-11T20:13:47,497 DEBUG [RS:1;5be53b084ac7:42037 {}] regionserver.HRegionServer(1325): Online Regions={913a3cf1b4a7cc7c1fdd6643b871acc5=TestHBaseWalOnEC,,1733948026486.913a3cf1b4a7cc7c1fdd6643b871acc5.} 2024-12-11T20:13:47,497 DEBUG [RS:1;5be53b084ac7:42037 {}] regionserver.HRegionServer(1351): Waiting on 913a3cf1b4a7cc7c1fdd6643b871acc5 2024-12-11T20:13:47,495 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-11T20:13:47,496 INFO [RS:2;5be53b084ac7:33319 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-11T20:13:47,497 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 913a3cf1b4a7cc7c1fdd6643b871acc5, disabling compactions & flushes 2024-12-11T20:13:47,497 INFO [RS:0;5be53b084ac7:42795 {}] regionserver.HRegionServer(959): stopping server 5be53b084ac7,42795,1733948024548 2024-12-11T20:13:47,497 INFO [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1733948026486.913a3cf1b4a7cc7c1fdd6643b871acc5. 2024-12-11T20:13:47,498 INFO [RS:0;5be53b084ac7:42795 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-11T20:13:47,498 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1733948026486.913a3cf1b4a7cc7c1fdd6643b871acc5. 2024-12-11T20:13:47,498 INFO [RS:0;5be53b084ac7:42795 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;5be53b084ac7:42795. 2024-12-11T20:13:47,498 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1733948026486.913a3cf1b4a7cc7c1fdd6643b871acc5. after waiting 0 ms 2024-12-11T20:13:47,498 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1733948026486.913a3cf1b4a7cc7c1fdd6643b871acc5. 2024-12-11T20:13:47,498 DEBUG [RS:0;5be53b084ac7:42795 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-11T20:13:47,498 DEBUG [RS:0;5be53b084ac7:42795 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T20:13:47,498 INFO [RS:0;5be53b084ac7:42795 {}] regionserver.HRegionServer(976): stopping server 5be53b084ac7,42795,1733948024548; all regions closed. 2024-12-11T20:13:47,498 INFO [RS:2;5be53b084ac7:33319 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-12-11T20:13:47,498 DEBUG [RS:2;5be53b084ac7:33319 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-12-11T20:13:47,498 DEBUG [RS:2;5be53b084ac7:33319 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-12-11T20:13:47,499 DEBUG [RS_CLOSE_META-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-11T20:13:47,499 INFO [RS_CLOSE_META-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-11T20:13:47,499 DEBUG [RS_CLOSE_META-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-11T20:13:47,499 DEBUG [RS_CLOSE_META-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-11T20:13:47,499 DEBUG [RS_CLOSE_META-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-11T20:13:47,500 INFO [RS_CLOSE_META-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.34 KB heapSize=3.38 KB 2024-12-11T20:13:47,506 INFO [regionserver/5be53b084ac7:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-11T20:13:47,515 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-11T20:13:47,515 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-11T20:13:47,516 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-11T20:13:47,519 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-11T20:13:47,523 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-11T20:13:47,532 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40513/user/jenkins/test-data/c4cda24e-7a10-f29a-6e48-0386002ed532/data/default/TestHBaseWalOnEC/913a3cf1b4a7cc7c1fdd6643b871acc5/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-11T20:13:47,532 DEBUG [RS_CLOSE_META-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40513/user/jenkins/test-data/c4cda24e-7a10-f29a-6e48-0386002ed532/data/hbase/meta/1588230740/.tmp/info/72be8c8cf4b1424dbe019932429c47a8 is 153, key is TestHBaseWalOnEC,,1733948026486.913a3cf1b4a7cc7c1fdd6643b871acc5./info:regioninfo/1733948026877/Put/seqid=0 2024-12-11T20:13:47,533 INFO [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1733948026486.913a3cf1b4a7cc7c1fdd6643b871acc5. 2024-12-11T20:13:47,533 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 913a3cf1b4a7cc7c1fdd6643b871acc5: Waiting for close lock at 1733948027497Running coprocessor pre-close hooks at 1733948027497Disabling compacts and flushes for region at 1733948027497Disabling writes for close at 1733948027498 (+1 ms)Writing region close event to WAL at 1733948027505 (+7 ms)Running coprocessor post-close hooks at 1733948027533 (+28 ms)Closed at 1733948027533 2024-12-11T20:13:47,534 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestHBaseWalOnEC,,1733948026486.913a3cf1b4a7cc7c1fdd6643b871acc5. 2024-12-11T20:13:47,543 INFO [regionserver/5be53b084ac7:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-11T20:13:47,543 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34453 is added to blk_1073741833_1009 (size=93) 2024-12-11T20:13:47,543 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44659 is added to blk_1073741833_1009 (size=93) 2024-12-11T20:13:47,543 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41547 is added to blk_1073741833_1009 (size=93) 2024-12-11T20:13:47,578 INFO [regionserver/5be53b084ac7:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-11T20:13:47,594 INFO [regionserver/5be53b084ac7:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-11T20:13:47,594 INFO [regionserver/5be53b084ac7:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-11T20:13:47,607 INFO [regionserver/5be53b084ac7:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-11T20:13:47,607 INFO [regionserver/5be53b084ac7:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-11T20:13:47,615 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41547 is added to blk_1073741840_1016 (size=6637) 2024-12-11T20:13:47,615 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44659 is added to blk_1073741840_1016 (size=6637) 2024-12-11T20:13:47,615 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34453 is added to blk_1073741840_1016 (size=6637) 2024-12-11T20:13:47,621 INFO [regionserver/5be53b084ac7:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-11T20:13:47,621 INFO [regionserver/5be53b084ac7:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-11T20:13:47,697 INFO [RS:1;5be53b084ac7:42037 {}] regionserver.HRegionServer(976): stopping server 5be53b084ac7,42037,1733948024603; all regions closed. 2024-12-11T20:13:47,698 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-11T20:13:47,699 DEBUG [RS:2;5be53b084ac7:33319 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-12-11T20:13:47,699 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-11T20:13:47,702 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-11T20:13:47,707 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-11T20:13:47,707 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-11T20:13:47,719 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34453 is added to blk_1073741835_1011 (size=1298) 2024-12-11T20:13:47,720 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44659 is added to blk_1073741835_1011 (size=1298) 2024-12-11T20:13:47,720 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41547 is added to blk_1073741835_1011 (size=1298) 2024-12-11T20:13:47,899 DEBUG [RS:2;5be53b084ac7:33319 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-12-11T20:13:47,947 DEBUG [RS:0;5be53b084ac7:42795 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/c4cda24e-7a10-f29a-6e48-0386002ed532/oldWALs 2024-12-11T20:13:47,947 INFO [RS:0;5be53b084ac7:42795 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 5be53b084ac7%2C42795%2C1733948024548:(num 1733948025745) 2024-12-11T20:13:47,947 DEBUG [RS:0;5be53b084ac7:42795 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T20:13:47,947 INFO [RS:0;5be53b084ac7:42795 {}] regionserver.LeaseManager(133): Closed leases 2024-12-11T20:13:47,947 INFO [RS:0;5be53b084ac7:42795 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-11T20:13:47,947 INFO [RS:0;5be53b084ac7:42795 {}] hbase.ChoreService(370): Chore service for: regionserver/5be53b084ac7:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-12-11T20:13:47,948 INFO [RS:0;5be53b084ac7:42795 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-11T20:13:47,948 INFO [RS:0;5be53b084ac7:42795 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-11T20:13:47,948 INFO [RS:0;5be53b084ac7:42795 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-11T20:13:47,948 INFO [RS:0;5be53b084ac7:42795 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-11T20:13:47,948 INFO [RS:0;5be53b084ac7:42795 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:42795 2024-12-11T20:13:47,949 INFO [regionserver/5be53b084ac7:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-11T20:13:47,975 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46085-0x1001690e4890000, quorum=127.0.0.1:53625, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-11T20:13:47,975 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42795-0x1001690e4890001, quorum=127.0.0.1:53625, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/5be53b084ac7,42795,1733948024548 2024-12-11T20:13:47,976 INFO [RS:0;5be53b084ac7:42795 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-11T20:13:47,984 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [5be53b084ac7,42795,1733948024548] 2024-12-11T20:13:47,992 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/5be53b084ac7,42795,1733948024548 already deleted, retry=false 2024-12-11T20:13:47,992 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 5be53b084ac7,42795,1733948024548 expired; onlineServers=2 2024-12-11T20:13:48,013 INFO [RS_CLOSE_META-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.18 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:40513/user/jenkins/test-data/c4cda24e-7a10-f29a-6e48-0386002ed532/data/hbase/meta/1588230740/.tmp/info/72be8c8cf4b1424dbe019932429c47a8 2024-12-11T20:13:48,047 DEBUG [RS_CLOSE_META-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40513/user/jenkins/test-data/c4cda24e-7a10-f29a-6e48-0386002ed532/data/hbase/meta/1588230740/.tmp/ns/1f51395227af4aac8942009ae911f797 is 43, key is default/ns:d/1733948026393/Put/seqid=0 2024-12-11T20:13:48,061 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41547 is added to blk_1073741841_1017 (size=5153) 2024-12-11T20:13:48,067 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44659 is added to blk_1073741841_1017 (size=5153) 2024-12-11T20:13:48,067 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34453 is added to blk_1073741841_1017 (size=5153) 2024-12-11T20:13:48,068 INFO [RS_CLOSE_META-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:40513/user/jenkins/test-data/c4cda24e-7a10-f29a-6e48-0386002ed532/data/hbase/meta/1588230740/.tmp/ns/1f51395227af4aac8942009ae911f797 2024-12-11T20:13:48,084 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42795-0x1001690e4890001, quorum=127.0.0.1:53625, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-11T20:13:48,084 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42795-0x1001690e4890001, quorum=127.0.0.1:53625, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-11T20:13:48,084 INFO [RS:0;5be53b084ac7:42795 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-11T20:13:48,085 INFO [RS:0;5be53b084ac7:42795 {}] regionserver.HRegionServer(1031): Exiting; stopping=5be53b084ac7,42795,1733948024548; zookeeper connection closed. 2024-12-11T20:13:48,087 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@28994b3e {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@28994b3e 2024-12-11T20:13:48,103 DEBUG [RS:2;5be53b084ac7:33319 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-12-11T20:13:48,111 DEBUG [RS_CLOSE_META-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40513/user/jenkins/test-data/c4cda24e-7a10-f29a-6e48-0386002ed532/data/hbase/meta/1588230740/.tmp/table/24f84b7332a44894b4ea48135e8f87f5 is 52, key is TestHBaseWalOnEC/table:state/1733948026894/Put/seqid=0 2024-12-11T20:13:48,117 DEBUG [RS:1;5be53b084ac7:42037 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/c4cda24e-7a10-f29a-6e48-0386002ed532/oldWALs 2024-12-11T20:13:48,117 INFO [RS:1;5be53b084ac7:42037 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 5be53b084ac7%2C42037%2C1733948024603:(num 1733948025767) 2024-12-11T20:13:48,117 DEBUG [RS:1;5be53b084ac7:42037 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T20:13:48,117 INFO [RS:1;5be53b084ac7:42037 {}] regionserver.LeaseManager(133): Closed leases 2024-12-11T20:13:48,117 INFO [RS:1;5be53b084ac7:42037 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-11T20:13:48,118 INFO [RS:1;5be53b084ac7:42037 {}] hbase.ChoreService(370): Chore service for: regionserver/5be53b084ac7:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-11T20:13:48,118 INFO [RS:1;5be53b084ac7:42037 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-11T20:13:48,118 INFO [RS:1;5be53b084ac7:42037 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-11T20:13:48,118 INFO [RS:1;5be53b084ac7:42037 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-11T20:13:48,118 INFO [RS:1;5be53b084ac7:42037 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-11T20:13:48,118 INFO [RS:1;5be53b084ac7:42037 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:42037 2024-12-11T20:13:48,118 INFO [regionserver/5be53b084ac7:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-11T20:13:48,147 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46085-0x1001690e4890000, quorum=127.0.0.1:53625, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-11T20:13:48,147 INFO [RS:1;5be53b084ac7:42037 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-11T20:13:48,147 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42037-0x1001690e4890002, quorum=127.0.0.1:53625, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/5be53b084ac7,42037,1733948024603 2024-12-11T20:13:48,157 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34453 is added to blk_1073741842_1018 (size=5249) 2024-12-11T20:13:48,158 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41547 is added to blk_1073741842_1018 (size=5249) 2024-12-11T20:13:48,158 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44659 is added to blk_1073741842_1018 (size=5249) 2024-12-11T20:13:48,159 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [5be53b084ac7,42037,1733948024603] 2024-12-11T20:13:48,164 INFO [RS_CLOSE_META-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=96 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:40513/user/jenkins/test-data/c4cda24e-7a10-f29a-6e48-0386002ed532/data/hbase/meta/1588230740/.tmp/table/24f84b7332a44894b4ea48135e8f87f5 2024-12-11T20:13:48,167 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/5be53b084ac7,42037,1733948024603 already deleted, retry=false 2024-12-11T20:13:48,167 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 5be53b084ac7,42037,1733948024603 expired; onlineServers=1 2024-12-11T20:13:48,176 DEBUG [RS_CLOSE_META-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40513/user/jenkins/test-data/c4cda24e-7a10-f29a-6e48-0386002ed532/data/hbase/meta/1588230740/.tmp/info/72be8c8cf4b1424dbe019932429c47a8 as hdfs://localhost:40513/user/jenkins/test-data/c4cda24e-7a10-f29a-6e48-0386002ed532/data/hbase/meta/1588230740/info/72be8c8cf4b1424dbe019932429c47a8 2024-12-11T20:13:48,194 INFO [RS_CLOSE_META-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40513/user/jenkins/test-data/c4cda24e-7a10-f29a-6e48-0386002ed532/data/hbase/meta/1588230740/info/72be8c8cf4b1424dbe019932429c47a8, entries=10, sequenceid=11, filesize=6.5 K 2024-12-11T20:13:48,199 DEBUG [RS_CLOSE_META-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40513/user/jenkins/test-data/c4cda24e-7a10-f29a-6e48-0386002ed532/data/hbase/meta/1588230740/.tmp/ns/1f51395227af4aac8942009ae911f797 as hdfs://localhost:40513/user/jenkins/test-data/c4cda24e-7a10-f29a-6e48-0386002ed532/data/hbase/meta/1588230740/ns/1f51395227af4aac8942009ae911f797 2024-12-11T20:13:48,217 INFO [RS_CLOSE_META-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40513/user/jenkins/test-data/c4cda24e-7a10-f29a-6e48-0386002ed532/data/hbase/meta/1588230740/ns/1f51395227af4aac8942009ae911f797, entries=2, sequenceid=11, filesize=5.0 K 2024-12-11T20:13:48,220 DEBUG [RS_CLOSE_META-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40513/user/jenkins/test-data/c4cda24e-7a10-f29a-6e48-0386002ed532/data/hbase/meta/1588230740/.tmp/table/24f84b7332a44894b4ea48135e8f87f5 as hdfs://localhost:40513/user/jenkins/test-data/c4cda24e-7a10-f29a-6e48-0386002ed532/data/hbase/meta/1588230740/table/24f84b7332a44894b4ea48135e8f87f5 2024-12-11T20:13:48,233 INFO [RS_CLOSE_META-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40513/user/jenkins/test-data/c4cda24e-7a10-f29a-6e48-0386002ed532/data/hbase/meta/1588230740/table/24f84b7332a44894b4ea48135e8f87f5, entries=2, sequenceid=11, filesize=5.1 K 2024-12-11T20:13:48,236 INFO [RS_CLOSE_META-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 736ms, sequenceid=11, compaction requested=false 2024-12-11T20:13:48,260 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42037-0x1001690e4890002, quorum=127.0.0.1:53625, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-11T20:13:48,260 INFO [RS:1;5be53b084ac7:42037 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-11T20:13:48,260 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42037-0x1001690e4890002, quorum=127.0.0.1:53625, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-11T20:13:48,260 INFO [RS:1;5be53b084ac7:42037 {}] regionserver.HRegionServer(1031): Exiting; stopping=5be53b084ac7,42037,1733948024603; zookeeper connection closed. 2024-12-11T20:13:48,263 DEBUG [RS_CLOSE_META-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40513/user/jenkins/test-data/c4cda24e-7a10-f29a-6e48-0386002ed532/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-12-11T20:13:48,265 DEBUG [RS_CLOSE_META-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-11T20:13:48,265 INFO [RS_CLOSE_META-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-11T20:13:48,265 DEBUG [RS_CLOSE_META-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733948027499Running coprocessor pre-close hooks at 1733948027499Disabling compacts and flushes for region at 1733948027499Disabling writes for close at 1733948027499Obtaining lock to block concurrent updates at 1733948027500 (+1 ms)Preparing flush snapshotting stores in 1588230740 at 1733948027500Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1377, getHeapSize=3392, getOffHeapSize=0, getCellsCount=14 at 1733948027500Flushing stores of hbase:meta,,1.1588230740 at 1733948027501 (+1 ms)Flushing 1588230740/info: creating writer at 1733948027502 (+1 ms)Flushing 1588230740/info: appending metadata at 1733948027531 (+29 ms)Flushing 1588230740/info: closing flushed file at 1733948027531Flushing 1588230740/ns: creating writer at 1733948028023 (+492 ms)Flushing 1588230740/ns: appending metadata at 1733948028046 (+23 ms)Flushing 1588230740/ns: closing flushed file at 1733948028046Flushing 1588230740/table: creating writer at 1733948028085 (+39 ms)Flushing 1588230740/table: appending metadata at 1733948028110 (+25 ms)Flushing 1588230740/table: closing flushed file at 1733948028110Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@39fbdcc3: reopening flushed file at 1733948028174 (+64 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@34cd8afb: reopening flushed file at 1733948028194 (+20 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@332d3815: reopening flushed file at 1733948028217 (+23 ms)Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 736ms, sequenceid=11, compaction requested=false at 1733948028236 (+19 ms)Writing region close event to WAL at 1733948028242 (+6 ms)Running coprocessor post-close hooks at 1733948028265 (+23 ms)Closed at 1733948028265 2024-12-11T20:13:48,265 DEBUG [RS_CLOSE_META-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-11T20:13:48,271 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@42fb13d4 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@42fb13d4 2024-12-11T20:13:48,303 INFO [RS:2;5be53b084ac7:33319 {}] regionserver.HRegionServer(976): stopping server 5be53b084ac7,33319,1733948024700; all regions closed. 2024-12-11T20:13:48,311 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-11T20:13:48,315 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-11T20:13:48,323 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-11T20:13:48,323 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-11T20:13:48,323 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-11T20:13:48,361 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44659 is added to blk_1073741836_1012 (size=2751) 2024-12-11T20:13:48,363 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34453 is added to blk_1073741836_1012 (size=2751) 2024-12-11T20:13:48,366 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41547 is added to blk_1073741836_1012 (size=2751) 2024-12-11T20:13:48,386 DEBUG [RS:2;5be53b084ac7:33319 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/c4cda24e-7a10-f29a-6e48-0386002ed532/oldWALs 2024-12-11T20:13:48,386 INFO [RS:2;5be53b084ac7:33319 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 5be53b084ac7%2C33319%2C1733948024700.meta:.meta(num 1733948026263) 2024-12-11T20:13:48,393 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-11T20:13:48,399 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-11T20:13:48,399 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-11T20:13:48,403 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-11T20:13:48,407 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-11T20:13:48,420 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44659 is added to blk_1073741834_1010 (size=93) 2024-12-11T20:13:48,422 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34453 is added to blk_1073741834_1010 (size=93) 2024-12-11T20:13:48,422 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41547 is added to blk_1073741834_1010 (size=93) 2024-12-11T20:13:48,430 DEBUG [RS:2;5be53b084ac7:33319 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/c4cda24e-7a10-f29a-6e48-0386002ed532/oldWALs 2024-12-11T20:13:48,430 INFO [RS:2;5be53b084ac7:33319 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 5be53b084ac7%2C33319%2C1733948024700:(num 1733948025751) 2024-12-11T20:13:48,430 DEBUG [RS:2;5be53b084ac7:33319 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T20:13:48,430 INFO [RS:2;5be53b084ac7:33319 {}] regionserver.LeaseManager(133): Closed leases 2024-12-11T20:13:48,430 INFO [RS:2;5be53b084ac7:33319 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-11T20:13:48,431 INFO [RS:2;5be53b084ac7:33319 {}] hbase.ChoreService(370): Chore service for: regionserver/5be53b084ac7:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-12-11T20:13:48,431 INFO [RS:2;5be53b084ac7:33319 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-11T20:13:48,431 INFO [RS:2;5be53b084ac7:33319 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:33319 2024-12-11T20:13:48,431 INFO [regionserver/5be53b084ac7:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-11T20:13:48,467 DEBUG [pool-336-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33319-0x1001690e4890003, quorum=127.0.0.1:53625, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/5be53b084ac7,33319,1733948024700 2024-12-11T20:13:48,468 INFO [RS:2;5be53b084ac7:33319 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-11T20:13:48,468 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46085-0x1001690e4890000, quorum=127.0.0.1:53625, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-11T20:13:48,494 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [5be53b084ac7,33319,1733948024700] 2024-12-11T20:13:48,511 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/5be53b084ac7,33319,1733948024700 already deleted, retry=false 2024-12-11T20:13:48,511 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 5be53b084ac7,33319,1733948024700 expired; onlineServers=0 2024-12-11T20:13:48,511 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '5be53b084ac7,46085,1733948024383' ***** 2024-12-11T20:13:48,512 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-11T20:13:48,512 INFO [M:0;5be53b084ac7:46085 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-11T20:13:48,512 INFO [M:0;5be53b084ac7:46085 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-11T20:13:48,512 DEBUG [M:0;5be53b084ac7:46085 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-11T20:13:48,512 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-11T20:13:48,512 DEBUG [M:0;5be53b084ac7:46085 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-11T20:13:48,512 DEBUG [master/5be53b084ac7:0:becomeActiveMaster-HFileCleaner.large.0-1733948025355 {}] cleaner.HFileCleaner(306): Exit Thread[master/5be53b084ac7:0:becomeActiveMaster-HFileCleaner.large.0-1733948025355,5,FailOnTimeoutGroup] 2024-12-11T20:13:48,512 DEBUG [master/5be53b084ac7:0:becomeActiveMaster-HFileCleaner.small.0-1733948025356 {}] cleaner.HFileCleaner(306): Exit Thread[master/5be53b084ac7:0:becomeActiveMaster-HFileCleaner.small.0-1733948025356,5,FailOnTimeoutGroup] 2024-12-11T20:13:48,512 INFO [M:0;5be53b084ac7:46085 {}] hbase.ChoreService(370): Chore service for: master/5be53b084ac7:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-11T20:13:48,512 INFO [M:0;5be53b084ac7:46085 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-11T20:13:48,513 DEBUG [M:0;5be53b084ac7:46085 {}] master.HMaster(1795): Stopping service threads 2024-12-11T20:13:48,513 INFO [M:0;5be53b084ac7:46085 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-11T20:13:48,513 INFO [M:0;5be53b084ac7:46085 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-11T20:13:48,513 INFO [M:0;5be53b084ac7:46085 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-11T20:13:48,513 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-11T20:13:48,525 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46085-0x1001690e4890000, quorum=127.0.0.1:53625, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-11T20:13:48,526 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46085-0x1001690e4890000, quorum=127.0.0.1:53625, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T20:13:48,526 DEBUG [M:0;5be53b084ac7:46085 {}] zookeeper.ZKUtil(347): master:46085-0x1001690e4890000, quorum=127.0.0.1:53625, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-11T20:13:48,526 WARN [M:0;5be53b084ac7:46085 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-11T20:13:48,527 INFO [M:0;5be53b084ac7:46085 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:40513/user/jenkins/test-data/c4cda24e-7a10-f29a-6e48-0386002ed532/.lastflushedseqids 2024-12-11T20:13:48,572 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41547 is added to blk_1073741843_1019 (size=127) 2024-12-11T20:13:48,572 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44659 is added to blk_1073741843_1019 (size=127) 2024-12-11T20:13:48,573 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34453 is added to blk_1073741843_1019 (size=127) 2024-12-11T20:13:48,599 DEBUG [pool-336-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33319-0x1001690e4890003, quorum=127.0.0.1:53625, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-11T20:13:48,599 DEBUG [pool-336-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33319-0x1001690e4890003, quorum=127.0.0.1:53625, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-11T20:13:48,599 INFO [RS:2;5be53b084ac7:33319 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-11T20:13:48,599 INFO [RS:2;5be53b084ac7:33319 {}] regionserver.HRegionServer(1031): Exiting; stopping=5be53b084ac7,33319,1733948024700; zookeeper connection closed. 2024-12-11T20:13:48,600 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@2ece0c42 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@2ece0c42 2024-12-11T20:13:48,607 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 3 regionserver(s) complete 2024-12-11T20:13:48,972 INFO [M:0;5be53b084ac7:46085 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-11T20:13:48,973 INFO [M:0;5be53b084ac7:46085 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-11T20:13:48,973 DEBUG [M:0;5be53b084ac7:46085 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-11T20:13:48,973 INFO [M:0;5be53b084ac7:46085 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-11T20:13:48,973 DEBUG [M:0;5be53b084ac7:46085 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-11T20:13:48,973 DEBUG [M:0;5be53b084ac7:46085 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-11T20:13:48,973 DEBUG [M:0;5be53b084ac7:46085 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-11T20:13:48,973 INFO [M:0;5be53b084ac7:46085 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=26.83 KB heapSize=34.12 KB 2024-12-11T20:13:48,992 DEBUG [M:0;5be53b084ac7:46085 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40513/user/jenkins/test-data/c4cda24e-7a10-f29a-6e48-0386002ed532/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/ff02ba3c8e1f4921ae371d4c636e243f is 82, key is hbase:meta,,1/info:regioninfo/1733948026341/Put/seqid=0 2024-12-11T20:13:49,012 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34453 is added to blk_1073741844_1020 (size=5672) 2024-12-11T20:13:49,013 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41547 is added to blk_1073741844_1020 (size=5672) 2024-12-11T20:13:49,013 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44659 is added to blk_1073741844_1020 (size=5672) 2024-12-11T20:13:49,014 INFO [M:0;5be53b084ac7:46085 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:40513/user/jenkins/test-data/c4cda24e-7a10-f29a-6e48-0386002ed532/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/ff02ba3c8e1f4921ae371d4c636e243f 2024-12-11T20:13:49,044 DEBUG [M:0;5be53b084ac7:46085 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40513/user/jenkins/test-data/c4cda24e-7a10-f29a-6e48-0386002ed532/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/eb7acf43b12549d0af03297d1a016624 is 748, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1733948026902/Put/seqid=0 2024-12-11T20:13:49,071 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41547 is added to blk_1073741845_1021 (size=6439) 2024-12-11T20:13:49,071 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44659 is added to blk_1073741845_1021 (size=6439) 2024-12-11T20:13:49,072 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34453 is added to blk_1073741845_1021 (size=6439) 2024-12-11T20:13:49,471 INFO [M:0;5be53b084ac7:46085 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.14 KB at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:40513/user/jenkins/test-data/c4cda24e-7a10-f29a-6e48-0386002ed532/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/eb7acf43b12549d0af03297d1a016624 2024-12-11T20:13:49,504 DEBUG [M:0;5be53b084ac7:46085 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40513/user/jenkins/test-data/c4cda24e-7a10-f29a-6e48-0386002ed532/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/98a03847822a4b76a841be418c71b00f is 69, key is 5be53b084ac7,33319,1733948024700/rs:state/1733948025475/Put/seqid=0 2024-12-11T20:13:49,520 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44659 is added to blk_1073741846_1022 (size=5294) 2024-12-11T20:13:49,521 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41547 is added to blk_1073741846_1022 (size=5294) 2024-12-11T20:13:49,522 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34453 is added to blk_1073741846_1022 (size=5294) 2024-12-11T20:13:49,523 INFO [M:0;5be53b084ac7:46085 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=195 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:40513/user/jenkins/test-data/c4cda24e-7a10-f29a-6e48-0386002ed532/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/98a03847822a4b76a841be418c71b00f 2024-12-11T20:13:49,533 DEBUG [M:0;5be53b084ac7:46085 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40513/user/jenkins/test-data/c4cda24e-7a10-f29a-6e48-0386002ed532/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/ff02ba3c8e1f4921ae371d4c636e243f as hdfs://localhost:40513/user/jenkins/test-data/c4cda24e-7a10-f29a-6e48-0386002ed532/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/ff02ba3c8e1f4921ae371d4c636e243f 2024-12-11T20:13:49,543 INFO [M:0;5be53b084ac7:46085 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40513/user/jenkins/test-data/c4cda24e-7a10-f29a-6e48-0386002ed532/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/ff02ba3c8e1f4921ae371d4c636e243f, entries=8, sequenceid=72, filesize=5.5 K 2024-12-11T20:13:49,545 DEBUG [M:0;5be53b084ac7:46085 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40513/user/jenkins/test-data/c4cda24e-7a10-f29a-6e48-0386002ed532/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/eb7acf43b12549d0af03297d1a016624 as hdfs://localhost:40513/user/jenkins/test-data/c4cda24e-7a10-f29a-6e48-0386002ed532/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/eb7acf43b12549d0af03297d1a016624 2024-12-11T20:13:49,560 INFO [M:0;5be53b084ac7:46085 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40513/user/jenkins/test-data/c4cda24e-7a10-f29a-6e48-0386002ed532/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/eb7acf43b12549d0af03297d1a016624, entries=8, sequenceid=72, filesize=6.3 K 2024-12-11T20:13:49,562 DEBUG [M:0;5be53b084ac7:46085 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40513/user/jenkins/test-data/c4cda24e-7a10-f29a-6e48-0386002ed532/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/98a03847822a4b76a841be418c71b00f as hdfs://localhost:40513/user/jenkins/test-data/c4cda24e-7a10-f29a-6e48-0386002ed532/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/98a03847822a4b76a841be418c71b00f 2024-12-11T20:13:49,569 INFO [M:0;5be53b084ac7:46085 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40513/user/jenkins/test-data/c4cda24e-7a10-f29a-6e48-0386002ed532/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/98a03847822a4b76a841be418c71b00f, entries=3, sequenceid=72, filesize=5.2 K 2024-12-11T20:13:49,571 INFO [M:0;5be53b084ac7:46085 {}] regionserver.HRegion(3140): Finished flush of dataSize ~26.83 KB/27471, heapSize ~33.82 KB/34632, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 598ms, sequenceid=72, compaction requested=false 2024-12-11T20:13:49,579 INFO [M:0;5be53b084ac7:46085 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-11T20:13:49,579 DEBUG [M:0;5be53b084ac7:46085 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733948028973Disabling compacts and flushes for region at 1733948028973Disabling writes for close at 1733948028973Obtaining lock to block concurrent updates at 1733948028973Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733948028973Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=27471, getHeapSize=34872, getOffHeapSize=0, getCellsCount=85 at 1733948028974 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733948028975 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733948028976 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733948028992 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733948028992Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733948029024 (+32 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733948029043 (+19 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733948029043Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733948029481 (+438 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733948029504 (+23 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733948029504Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@776acedb: reopening flushed file at 1733948029531 (+27 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@e731a9f: reopening flushed file at 1733948029544 (+13 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@12142947: reopening flushed file at 1733948029561 (+17 ms)Finished flush of dataSize ~26.83 KB/27471, heapSize ~33.82 KB/34632, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 598ms, sequenceid=72, compaction requested=false at 1733948029571 (+10 ms)Writing region close event to WAL at 1733948029579 (+8 ms)Closed at 1733948029579 2024-12-11T20:13:49,580 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-11T20:13:49,583 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-11T20:13:49,583 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-11T20:13:49,583 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-11T20:13:49,583 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-11T20:13:49,586 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34453 is added to blk_1073741830_1006 (size=32674) 2024-12-11T20:13:49,586 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41547 is added to blk_1073741830_1006 (size=32674) 2024-12-11T20:13:49,588 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44659 is added to blk_1073741830_1006 (size=32674) 2024-12-11T20:13:49,592 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-11T20:13:49,592 INFO [M:0;5be53b084ac7:46085 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-11T20:13:49,592 INFO [M:0;5be53b084ac7:46085 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:46085 2024-12-11T20:13:49,593 INFO [M:0;5be53b084ac7:46085 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-11T20:13:49,742 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46085-0x1001690e4890000, quorum=127.0.0.1:53625, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-11T20:13:49,742 INFO [M:0;5be53b084ac7:46085 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-11T20:13:49,742 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46085-0x1001690e4890000, quorum=127.0.0.1:53625, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-11T20:13:49,750 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@151d7336{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-11T20:13:49,751 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2c66e5d3{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-11T20:13:49,751 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-11T20:13:49,751 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@29b80c96{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-11T20:13:49,751 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6a8aabef{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/07e5cb37-a8ad-e6c3-4f33-b29594635d59/hadoop.log.dir/,STOPPED} 2024-12-11T20:13:49,756 WARN [BP-1275173032-172.17.0.2-1733948022061 heartbeating to localhost/127.0.0.1:40513 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-11T20:13:49,756 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-11T20:13:49,756 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-11T20:13:49,756 WARN [BP-1275173032-172.17.0.2-1733948022061 heartbeating to localhost/127.0.0.1:40513 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1275173032-172.17.0.2-1733948022061 (Datanode Uuid 195a5ec9-38f6-410c-b387-ca30d81d46ae) service to localhost/127.0.0.1:40513 2024-12-11T20:13:49,756 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/07e5cb37-a8ad-e6c3-4f33-b29594635d59/cluster_05bd6f84-ac52-08df-75b7-2ab28b8012bd/data/data5/current/BP-1275173032-172.17.0.2-1733948022061 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-11T20:13:49,756 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/07e5cb37-a8ad-e6c3-4f33-b29594635d59/cluster_05bd6f84-ac52-08df-75b7-2ab28b8012bd/data/data6/current/BP-1275173032-172.17.0.2-1733948022061 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-11T20:13:49,757 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-11T20:13:49,761 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5a89ba0b{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-11T20:13:49,762 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@41ae5b4a{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-11T20:13:49,762 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-11T20:13:49,762 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@791ac62b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-11T20:13:49,762 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1c414977{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/07e5cb37-a8ad-e6c3-4f33-b29594635d59/hadoop.log.dir/,STOPPED} 2024-12-11T20:13:49,766 WARN [BP-1275173032-172.17.0.2-1733948022061 heartbeating to localhost/127.0.0.1:40513 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-11T20:13:49,766 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-11T20:13:49,766 WARN [BP-1275173032-172.17.0.2-1733948022061 heartbeating to localhost/127.0.0.1:40513 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1275173032-172.17.0.2-1733948022061 (Datanode Uuid e741c26c-9e20-4fbb-b36c-31f9c4b7854c) service to localhost/127.0.0.1:40513 2024-12-11T20:13:49,766 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-11T20:13:49,767 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/07e5cb37-a8ad-e6c3-4f33-b29594635d59/cluster_05bd6f84-ac52-08df-75b7-2ab28b8012bd/data/data3/current/BP-1275173032-172.17.0.2-1733948022061 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-11T20:13:49,767 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/07e5cb37-a8ad-e6c3-4f33-b29594635d59/cluster_05bd6f84-ac52-08df-75b7-2ab28b8012bd/data/data4/current/BP-1275173032-172.17.0.2-1733948022061 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-11T20:13:49,768 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-11T20:13:49,771 WARN [BP-1275173032-172.17.0.2-1733948022061 heartbeating to localhost/127.0.0.1:40513 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1275173032-172.17.0.2-1733948022061 (Datanode Uuid 8779e464-33c2-4161-ad6c-472b8e8b1883) service to localhost/127.0.0.1:40513 2024-12-11T20:13:49,772 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/07e5cb37-a8ad-e6c3-4f33-b29594635d59/cluster_05bd6f84-ac52-08df-75b7-2ab28b8012bd/data/data1/current/BP-1275173032-172.17.0.2-1733948022061 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-11T20:13:49,773 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/07e5cb37-a8ad-e6c3-4f33-b29594635d59/cluster_05bd6f84-ac52-08df-75b7-2ab28b8012bd/data/data2/current/BP-1275173032-172.17.0.2-1733948022061 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-11T20:13:49,776 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5cc92116{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-11T20:13:49,777 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1ac2df30{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-11T20:13:49,777 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-11T20:13:49,777 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4218be64{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-11T20:13:49,778 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2d0b4a63{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/07e5cb37-a8ad-e6c3-4f33-b29594635d59/hadoop.log.dir/,STOPPED} 2024-12-11T20:13:49,790 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-11T20:13:49,799 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@64064f3d{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-11T20:13:49,807 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@485e3231{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-11T20:13:49,807 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-11T20:13:49,808 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@94f688b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-11T20:13:49,808 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7288ff9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/07e5cb37-a8ad-e6c3-4f33-b29594635d59/hadoop.log.dir/,STOPPED} 2024-12-11T20:13:49,823 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-11T20:13:49,873 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-11T20:13:49,888 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestHBaseWalOnEC#testReadWrite[1] Thread=145 (was 85) - Thread LEAK? -, OpenFileDescriptor=518 (was 439) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=1037 (was 1047), ProcessCount=11 (was 11), AvailableMemoryMB=6041 (was 4712) - AvailableMemoryMB LEAK? -