2024-12-09 23:46:10,416 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba 2024-12-09 23:46:10,427 main DEBUG Took 0.008869 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-12-09 23:46:10,427 main DEBUG PluginManager 'Core' found 129 plugins 2024-12-09 23:46:10,428 main DEBUG PluginManager 'Level' found 0 plugins 2024-12-09 23:46:10,428 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-12-09 23:46:10,429 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 23:46:10,438 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-12-09 23:46:10,454 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 23:46:10,455 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 23:46:10,455 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 23:46:10,455 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 23:46:10,456 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 23:46:10,456 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 23:46:10,457 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 23:46:10,457 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 23:46:10,457 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 23:46:10,458 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 23:46:10,458 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 23:46:10,458 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 23:46:10,459 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 23:46:10,459 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 23:46:10,459 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 23:46:10,460 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 23:46:10,460 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 23:46:10,460 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 23:46:10,461 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 23:46:10,461 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 23:46:10,461 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 23:46:10,461 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 23:46:10,462 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 23:46:10,462 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 23:46:10,462 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 23:46:10,462 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-12-09 23:46:10,464 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 23:46:10,465 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-12-09 23:46:10,466 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-12-09 23:46:10,467 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-12-09 23:46:10,468 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-12-09 23:46:10,468 main DEBUG PluginManager 'Converter' found 47 plugins 2024-12-09 23:46:10,476 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-12-09 23:46:10,478 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-12-09 23:46:10,480 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-12-09 23:46:10,480 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-12-09 23:46:10,480 main DEBUG createAppenders(={Console}) 2024-12-09 23:46:10,481 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba initialized 2024-12-09 23:46:10,482 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba 2024-12-09 23:46:10,482 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba OK. 2024-12-09 23:46:10,482 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-12-09 23:46:10,482 main DEBUG OutputStream closed 2024-12-09 23:46:10,483 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-12-09 23:46:10,483 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-12-09 23:46:10,483 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@49c7b90e OK 2024-12-09 23:46:10,542 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-12-09 23:46:10,543 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-12-09 23:46:10,544 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-12-09 23:46:10,545 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-12-09 23:46:10,545 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-12-09 23:46:10,546 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-12-09 23:46:10,546 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-12-09 23:46:10,546 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-12-09 23:46:10,546 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-12-09 23:46:10,547 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-12-09 23:46:10,547 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-12-09 23:46:10,547 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-12-09 23:46:10,548 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-12-09 23:46:10,548 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-12-09 23:46:10,548 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-12-09 23:46:10,548 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-12-09 23:46:10,549 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-12-09 23:46:10,549 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-12-09 23:46:10,551 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-09 23:46:10,551 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-logging/target/hbase-logging-4.0.0-alpha-1-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@35432107) with optional ClassLoader: null 2024-12-09 23:46:10,552 main DEBUG Shutdown hook enabled. Registering a new one. 2024-12-09 23:46:10,552 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@35432107] started OK. 2024-12-09T23:46:10,565 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC timeout: 26 mins 2024-12-09 23:46:10,568 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-12-09 23:46:10,568 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-09T23:46:10,777 DEBUG [main {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3ba2f68b-8f6d-c4b0-2008-7c1ba6915ea6 2024-12-09T23:46:10,799 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3ba2f68b-8f6d-c4b0-2008-7c1ba6915ea6/cluster_84061776-8be8-4651-1817-94431618e00f, deleteOnExit=true 2024-12-09T23:46:10,800 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3ba2f68b-8f6d-c4b0-2008-7c1ba6915ea6/test.cache.data in system properties and HBase conf 2024-12-09T23:46:10,800 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3ba2f68b-8f6d-c4b0-2008-7c1ba6915ea6/hadoop.tmp.dir in system properties and HBase conf 2024-12-09T23:46:10,801 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3ba2f68b-8f6d-c4b0-2008-7c1ba6915ea6/hadoop.log.dir in system properties and HBase conf 2024-12-09T23:46:10,801 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3ba2f68b-8f6d-c4b0-2008-7c1ba6915ea6/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-09T23:46:10,802 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3ba2f68b-8f6d-c4b0-2008-7c1ba6915ea6/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-09T23:46:10,802 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-09T23:46:10,896 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-12-09T23:46:10,998 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-09T23:46:11,003 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3ba2f68b-8f6d-c4b0-2008-7c1ba6915ea6/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-09T23:46:11,004 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3ba2f68b-8f6d-c4b0-2008-7c1ba6915ea6/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-09T23:46:11,004 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3ba2f68b-8f6d-c4b0-2008-7c1ba6915ea6/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-09T23:46:11,005 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3ba2f68b-8f6d-c4b0-2008-7c1ba6915ea6/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-09T23:46:11,005 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3ba2f68b-8f6d-c4b0-2008-7c1ba6915ea6/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-09T23:46:11,005 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3ba2f68b-8f6d-c4b0-2008-7c1ba6915ea6/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-09T23:46:11,006 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3ba2f68b-8f6d-c4b0-2008-7c1ba6915ea6/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-09T23:46:11,006 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3ba2f68b-8f6d-c4b0-2008-7c1ba6915ea6/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-09T23:46:11,006 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3ba2f68b-8f6d-c4b0-2008-7c1ba6915ea6/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-09T23:46:11,007 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3ba2f68b-8f6d-c4b0-2008-7c1ba6915ea6/nfs.dump.dir in system properties and HBase conf 2024-12-09T23:46:11,007 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3ba2f68b-8f6d-c4b0-2008-7c1ba6915ea6/java.io.tmpdir in system properties and HBase conf 2024-12-09T23:46:11,007 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3ba2f68b-8f6d-c4b0-2008-7c1ba6915ea6/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-09T23:46:11,008 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3ba2f68b-8f6d-c4b0-2008-7c1ba6915ea6/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-09T23:46:11,008 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3ba2f68b-8f6d-c4b0-2008-7c1ba6915ea6/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-09T23:46:11,992 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-12-09T23:46:12,053 INFO [Time-limited test {}] log.Log(170): Logging initialized @2202ms to org.eclipse.jetty.util.log.Slf4jLog 2024-12-09T23:46:12,116 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T23:46:12,180 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T23:46:12,198 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T23:46:12,198 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T23:46:12,200 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-09T23:46:12,211 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T23:46:12,213 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@760c69c0{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3ba2f68b-8f6d-c4b0-2008-7c1ba6915ea6/hadoop.log.dir/,AVAILABLE} 2024-12-09T23:46:12,214 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@ce709a8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T23:46:12,364 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@62d6efd9{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3ba2f68b-8f6d-c4b0-2008-7c1ba6915ea6/java.io.tmpdir/jetty-localhost-43985-hadoop-hdfs-3_4_1-tests_jar-_-any-12867513145865051692/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-09T23:46:12,376 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@353d35a1{HTTP/1.1, (http/1.1)}{localhost:43985} 2024-12-09T23:46:12,376 INFO [Time-limited test {}] server.Server(415): Started @2526ms 2024-12-09T23:46:12,853 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T23:46:12,861 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T23:46:12,862 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T23:46:12,862 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T23:46:12,863 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-09T23:46:12,863 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3a5de9e4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3ba2f68b-8f6d-c4b0-2008-7c1ba6915ea6/hadoop.log.dir/,AVAILABLE} 2024-12-09T23:46:12,864 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@69893329{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T23:46:12,963 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1b97a472{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3ba2f68b-8f6d-c4b0-2008-7c1ba6915ea6/java.io.tmpdir/jetty-localhost-37215-hadoop-hdfs-3_4_1-tests_jar-_-any-16442779159627008013/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T23:46:12,964 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3722a29b{HTTP/1.1, (http/1.1)}{localhost:37215} 2024-12-09T23:46:12,964 INFO [Time-limited test {}] server.Server(415): Started @3114ms 2024-12-09T23:46:13,007 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T23:46:13,116 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T23:46:13,123 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T23:46:13,127 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T23:46:13,127 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T23:46:13,127 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-09T23:46:13,128 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@510fec09{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3ba2f68b-8f6d-c4b0-2008-7c1ba6915ea6/hadoop.log.dir/,AVAILABLE} 2024-12-09T23:46:13,129 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@40eb7053{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T23:46:13,229 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@353955e9{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3ba2f68b-8f6d-c4b0-2008-7c1ba6915ea6/java.io.tmpdir/jetty-localhost-37831-hadoop-hdfs-3_4_1-tests_jar-_-any-12707731110362426214/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T23:46:13,230 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@11738cd8{HTTP/1.1, (http/1.1)}{localhost:37831} 2024-12-09T23:46:13,230 INFO [Time-limited test {}] server.Server(415): Started @3379ms 2024-12-09T23:46:13,232 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T23:46:13,264 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T23:46:13,269 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T23:46:13,270 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T23:46:13,271 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T23:46:13,271 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-09T23:46:13,272 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@16cd567f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3ba2f68b-8f6d-c4b0-2008-7c1ba6915ea6/hadoop.log.dir/,AVAILABLE} 2024-12-09T23:46:13,273 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5822645a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T23:46:13,368 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3114ae69{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3ba2f68b-8f6d-c4b0-2008-7c1ba6915ea6/java.io.tmpdir/jetty-localhost-45703-hadoop-hdfs-3_4_1-tests_jar-_-any-6294003310400753975/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T23:46:13,369 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3c70a874{HTTP/1.1, (http/1.1)}{localhost:45703} 2024-12-09T23:46:13,369 INFO [Time-limited test {}] server.Server(415): Started @3519ms 2024-12-09T23:46:13,371 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T23:46:14,405 WARN [Thread-122 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3ba2f68b-8f6d-c4b0-2008-7c1ba6915ea6/cluster_84061776-8be8-4651-1817-94431618e00f/data/data1/current/BP-1524071419-172.17.0.2-1733787971502/current, will proceed with Du for space computation calculation, 2024-12-09T23:46:14,405 WARN [Thread-125 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3ba2f68b-8f6d-c4b0-2008-7c1ba6915ea6/cluster_84061776-8be8-4651-1817-94431618e00f/data/data4/current/BP-1524071419-172.17.0.2-1733787971502/current, will proceed with Du for space computation calculation, 2024-12-09T23:46:14,405 WARN [Thread-123 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3ba2f68b-8f6d-c4b0-2008-7c1ba6915ea6/cluster_84061776-8be8-4651-1817-94431618e00f/data/data2/current/BP-1524071419-172.17.0.2-1733787971502/current, will proceed with Du for space computation calculation, 2024-12-09T23:46:14,405 WARN [Thread-124 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3ba2f68b-8f6d-c4b0-2008-7c1ba6915ea6/cluster_84061776-8be8-4651-1817-94431618e00f/data/data3/current/BP-1524071419-172.17.0.2-1733787971502/current, will proceed with Du for space computation calculation, 2024-12-09T23:46:14,441 WARN [Thread-81 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T23:46:14,441 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T23:46:14,458 WARN [Thread-142 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3ba2f68b-8f6d-c4b0-2008-7c1ba6915ea6/cluster_84061776-8be8-4651-1817-94431618e00f/data/data5/current/BP-1524071419-172.17.0.2-1733787971502/current, will proceed with Du for space computation calculation, 2024-12-09T23:46:14,458 WARN [Thread-143 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3ba2f68b-8f6d-c4b0-2008-7c1ba6915ea6/cluster_84061776-8be8-4651-1817-94431618e00f/data/data6/current/BP-1524071419-172.17.0.2-1733787971502/current, will proceed with Du for space computation calculation, 2024-12-09T23:46:14,481 WARN [Thread-103 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T23:46:14,488 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xf78a06d8c29d5f5e with lease ID 0x23c87a9799314d23: Processing first storage report for DS-e8617d22-a05c-419e-a033-3e19bdedd3bb from datanode DatanodeRegistration(127.0.0.1:39539, datanodeUuid=99a14f3e-adcd-4b13-930e-3e9e5189a466, infoPort=45055, infoSecurePort=0, ipcPort=36551, storageInfo=lv=-57;cid=testClusterID;nsid=434234487;c=1733787971502) 2024-12-09T23:46:14,489 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf78a06d8c29d5f5e with lease ID 0x23c87a9799314d23: from storage DS-e8617d22-a05c-419e-a033-3e19bdedd3bb node DatanodeRegistration(127.0.0.1:39539, datanodeUuid=99a14f3e-adcd-4b13-930e-3e9e5189a466, infoPort=45055, infoSecurePort=0, ipcPort=36551, storageInfo=lv=-57;cid=testClusterID;nsid=434234487;c=1733787971502), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-09T23:46:14,489 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xff51cd2afbb52ec1 with lease ID 0x23c87a9799314d25: Processing first storage report for DS-d8c9e1ca-b4a4-4c8d-853d-dc76b8edfa36 from datanode DatanodeRegistration(127.0.0.1:41073, datanodeUuid=02d055fa-762c-4bf9-b089-11f3c212d05c, infoPort=44409, infoSecurePort=0, ipcPort=38547, storageInfo=lv=-57;cid=testClusterID;nsid=434234487;c=1733787971502) 2024-12-09T23:46:14,489 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xff51cd2afbb52ec1 with lease ID 0x23c87a9799314d25: from storage DS-d8c9e1ca-b4a4-4c8d-853d-dc76b8edfa36 node DatanodeRegistration(127.0.0.1:41073, datanodeUuid=02d055fa-762c-4bf9-b089-11f3c212d05c, infoPort=44409, infoSecurePort=0, ipcPort=38547, storageInfo=lv=-57;cid=testClusterID;nsid=434234487;c=1733787971502), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T23:46:14,490 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x32813491149bf5f3 with lease ID 0x23c87a9799314d24: Processing first storage report for DS-6c5e70a9-295f-4455-be70-31e472f21fe4 from datanode DatanodeRegistration(127.0.0.1:35729, datanodeUuid=6a308b02-07e4-4d41-8410-7ec60f96a6d2, infoPort=36527, infoSecurePort=0, ipcPort=36173, storageInfo=lv=-57;cid=testClusterID;nsid=434234487;c=1733787971502) 2024-12-09T23:46:14,490 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x32813491149bf5f3 with lease ID 0x23c87a9799314d24: from storage DS-6c5e70a9-295f-4455-be70-31e472f21fe4 node DatanodeRegistration(127.0.0.1:35729, datanodeUuid=6a308b02-07e4-4d41-8410-7ec60f96a6d2, infoPort=36527, infoSecurePort=0, ipcPort=36173, storageInfo=lv=-57;cid=testClusterID;nsid=434234487;c=1733787971502), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-09T23:46:14,490 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xf78a06d8c29d5f5e with lease ID 0x23c87a9799314d23: Processing first storage report for DS-83364e5b-5385-409d-af6c-5dabc79449a1 from datanode DatanodeRegistration(127.0.0.1:39539, datanodeUuid=99a14f3e-adcd-4b13-930e-3e9e5189a466, infoPort=45055, infoSecurePort=0, ipcPort=36551, storageInfo=lv=-57;cid=testClusterID;nsid=434234487;c=1733787971502) 2024-12-09T23:46:14,490 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf78a06d8c29d5f5e with lease ID 0x23c87a9799314d23: from storage DS-83364e5b-5385-409d-af6c-5dabc79449a1 node DatanodeRegistration(127.0.0.1:39539, datanodeUuid=99a14f3e-adcd-4b13-930e-3e9e5189a466, infoPort=45055, infoSecurePort=0, ipcPort=36551, storageInfo=lv=-57;cid=testClusterID;nsid=434234487;c=1733787971502), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T23:46:14,490 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xff51cd2afbb52ec1 with lease ID 0x23c87a9799314d25: Processing first storage report for DS-15e185f8-274b-41b0-82e9-edc2d9ce0d0e from datanode DatanodeRegistration(127.0.0.1:41073, datanodeUuid=02d055fa-762c-4bf9-b089-11f3c212d05c, infoPort=44409, infoSecurePort=0, ipcPort=38547, storageInfo=lv=-57;cid=testClusterID;nsid=434234487;c=1733787971502) 2024-12-09T23:46:14,490 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xff51cd2afbb52ec1 with lease ID 0x23c87a9799314d25: from storage DS-15e185f8-274b-41b0-82e9-edc2d9ce0d0e node DatanodeRegistration(127.0.0.1:41073, datanodeUuid=02d055fa-762c-4bf9-b089-11f3c212d05c, infoPort=44409, infoSecurePort=0, ipcPort=38547, storageInfo=lv=-57;cid=testClusterID;nsid=434234487;c=1733787971502), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T23:46:14,490 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x32813491149bf5f3 with lease ID 0x23c87a9799314d24: Processing first storage report for DS-b98aa086-f27a-4767-b272-ffac4db4f391 from datanode DatanodeRegistration(127.0.0.1:35729, datanodeUuid=6a308b02-07e4-4d41-8410-7ec60f96a6d2, infoPort=36527, infoSecurePort=0, ipcPort=36173, storageInfo=lv=-57;cid=testClusterID;nsid=434234487;c=1733787971502) 2024-12-09T23:46:14,491 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x32813491149bf5f3 with lease ID 0x23c87a9799314d24: from storage DS-b98aa086-f27a-4767-b272-ffac4db4f391 node DatanodeRegistration(127.0.0.1:35729, datanodeUuid=6a308b02-07e4-4d41-8410-7ec60f96a6d2, infoPort=36527, infoSecurePort=0, ipcPort=36173, storageInfo=lv=-57;cid=testClusterID;nsid=434234487;c=1733787971502), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T23:46:14,582 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3ba2f68b-8f6d-c4b0-2008-7c1ba6915ea6 2024-12-09T23:46:14,654 WARN [Time-limited test {}] erasurecode.ErasureCodeNative(55): ISA-L support is not available in your platform... using builtin-java codec where applicable 2024-12-09T23:46:14,708 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestHBaseWalOnEC#testReadWrite[0] Thread=156, OpenFileDescriptor=393, MaxFileDescriptor=1048576, SystemLoadAverage=181, ProcessCount=11, AvailableMemoryMB=5436 2024-12-09T23:46:14,710 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=3, rsPorts=, rsClass=null, numDataNodes=3, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-09T23:46:14,717 INFO [Time-limited test {}] hbase.HBaseTestingUtil(821): NOT STARTING DFS 2024-12-09T23:46:14,783 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3ba2f68b-8f6d-c4b0-2008-7c1ba6915ea6/cluster_84061776-8be8-4651-1817-94431618e00f/zookeeper_0, clientPort=56064, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3ba2f68b-8f6d-c4b0-2008-7c1ba6915ea6/cluster_84061776-8be8-4651-1817-94431618e00f/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3ba2f68b-8f6d-c4b0-2008-7c1ba6915ea6/cluster_84061776-8be8-4651-1817-94431618e00f/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-09T23:46:14,793 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=56064 2024-12-09T23:46:14,813 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T23:46:14,817 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T23:46:14,905 WARN [Time-limited test {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T23:46:14,905 WARN [Time-limited test {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T23:46:14,950 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1542472797_22 at /127.0.0.1:55034 [Receiving block BP-1524071419-172.17.0.2-1733787971502:blk_-9223372036854775792_1001] {}] datanode.DataXceiver(331): 127.0.0.1:41073:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:55034 dst: /127.0.0.1:41073 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T23:46:14,974 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41073 is added to blk_-9223372036854775792_1002 (size=7) 2024-12-09T23:46:15,367 WARN [Time-limited test {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-09T23:46:15,377 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:37867/user/jenkins/test-data/1c338360-f03d-9fa5-8641-bcfa93b60f00 with version=8 2024-12-09T23:46:15,378 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:37867/user/jenkins/test-data/1c338360-f03d-9fa5-8641-bcfa93b60f00/hbase-staging 2024-12-09T23:46:15,459 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-12-09T23:46:15,655 INFO [Time-limited test {}] client.ConnectionUtils(128): master/2907d75fbb3e:0 server-side Connection retries=45 2024-12-09T23:46:15,663 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T23:46:15,663 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-09T23:46:15,667 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-09T23:46:15,667 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T23:46:15,667 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-09T23:46:15,787 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-09T23:46:15,837 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-12-09T23:46:15,845 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-12-09T23:46:15,848 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-09T23:46:15,869 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 20265 (auto-detected) 2024-12-09T23:46:15,870 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-12-09T23:46:15,887 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:46061 2024-12-09T23:46:15,906 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:46061 connecting to ZooKeeper ensemble=127.0.0.1:56064 2024-12-09T23:46:15,973 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:460610x0, quorum=127.0.0.1:56064, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-09T23:46:15,978 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:46061-0x1000d06bb0c0000 connected 2024-12-09T23:46:16,052 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T23:46:16,058 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T23:46:16,071 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:46061-0x1000d06bb0c0000, quorum=127.0.0.1:56064, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T23:46:16,075 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:37867/user/jenkins/test-data/1c338360-f03d-9fa5-8641-bcfa93b60f00, hbase.cluster.distributed=false 2024-12-09T23:46:16,095 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:46061-0x1000d06bb0c0000, quorum=127.0.0.1:56064, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-09T23:46:16,099 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=46061 2024-12-09T23:46:16,101 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=46061 2024-12-09T23:46:16,101 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=46061 2024-12-09T23:46:16,102 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=46061 2024-12-09T23:46:16,102 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=46061 2024-12-09T23:46:16,188 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/2907d75fbb3e:0 server-side Connection retries=45 2024-12-09T23:46:16,189 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T23:46:16,190 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-09T23:46:16,190 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-09T23:46:16,190 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T23:46:16,190 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-09T23:46:16,192 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-09T23:46:16,194 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-09T23:46:16,195 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:36655 2024-12-09T23:46:16,196 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:36655 connecting to ZooKeeper ensemble=127.0.0.1:56064 2024-12-09T23:46:16,197 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T23:46:16,200 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T23:46:16,216 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:366550x0, quorum=127.0.0.1:56064, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-09T23:46:16,217 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:36655-0x1000d06bb0c0001 connected 2024-12-09T23:46:16,217 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36655-0x1000d06bb0c0001, quorum=127.0.0.1:56064, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T23:46:16,222 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-09T23:46:16,230 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-09T23:46:16,232 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36655-0x1000d06bb0c0001, quorum=127.0.0.1:56064, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-09T23:46:16,237 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36655-0x1000d06bb0c0001, quorum=127.0.0.1:56064, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-09T23:46:16,238 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=36655 2024-12-09T23:46:16,238 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=36655 2024-12-09T23:46:16,239 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=36655 2024-12-09T23:46:16,240 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=36655 2024-12-09T23:46:16,240 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=36655 2024-12-09T23:46:16,254 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/2907d75fbb3e:0 server-side Connection retries=45 2024-12-09T23:46:16,254 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T23:46:16,254 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-09T23:46:16,255 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-09T23:46:16,255 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T23:46:16,255 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-09T23:46:16,255 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-09T23:46:16,256 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-09T23:46:16,257 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:46803 2024-12-09T23:46:16,258 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:46803 connecting to ZooKeeper ensemble=127.0.0.1:56064 2024-12-09T23:46:16,259 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T23:46:16,261 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T23:46:16,298 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:468030x0, quorum=127.0.0.1:56064, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-09T23:46:16,299 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:46803-0x1000d06bb0c0002 connected 2024-12-09T23:46:16,299 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46803-0x1000d06bb0c0002, quorum=127.0.0.1:56064, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T23:46:16,300 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-09T23:46:16,301 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-09T23:46:16,302 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46803-0x1000d06bb0c0002, quorum=127.0.0.1:56064, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-09T23:46:16,305 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46803-0x1000d06bb0c0002, quorum=127.0.0.1:56064, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-09T23:46:16,306 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=46803 2024-12-09T23:46:16,307 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=46803 2024-12-09T23:46:16,307 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=46803 2024-12-09T23:46:16,308 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=46803 2024-12-09T23:46:16,309 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=46803 2024-12-09T23:46:16,326 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/2907d75fbb3e:0 server-side Connection retries=45 2024-12-09T23:46:16,326 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T23:46:16,326 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-09T23:46:16,326 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-09T23:46:16,327 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T23:46:16,327 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-09T23:46:16,327 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-09T23:46:16,327 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-09T23:46:16,328 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:39769 2024-12-09T23:46:16,329 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:39769 connecting to ZooKeeper ensemble=127.0.0.1:56064 2024-12-09T23:46:16,331 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T23:46:16,333 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T23:46:16,358 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:397690x0, quorum=127.0.0.1:56064, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-09T23:46:16,359 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:39769-0x1000d06bb0c0003 connected 2024-12-09T23:46:16,359 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39769-0x1000d06bb0c0003, quorum=127.0.0.1:56064, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T23:46:16,360 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-09T23:46:16,361 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-09T23:46:16,363 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39769-0x1000d06bb0c0003, quorum=127.0.0.1:56064, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-09T23:46:16,366 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39769-0x1000d06bb0c0003, quorum=127.0.0.1:56064, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-09T23:46:16,368 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=39769 2024-12-09T23:46:16,370 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=39769 2024-12-09T23:46:16,371 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=39769 2024-12-09T23:46:16,373 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=39769 2024-12-09T23:46:16,373 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=39769 2024-12-09T23:46:16,392 DEBUG [M:0;2907d75fbb3e:46061 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;2907d75fbb3e:46061 2024-12-09T23:46:16,394 INFO [master/2907d75fbb3e:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/2907d75fbb3e,46061,1733787975505 2024-12-09T23:46:16,409 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46061-0x1000d06bb0c0000, quorum=127.0.0.1:56064, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T23:46:16,409 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46803-0x1000d06bb0c0002, quorum=127.0.0.1:56064, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T23:46:16,409 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39769-0x1000d06bb0c0003, quorum=127.0.0.1:56064, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T23:46:16,409 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36655-0x1000d06bb0c0001, quorum=127.0.0.1:56064, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T23:46:16,411 DEBUG [master/2907d75fbb3e:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:46061-0x1000d06bb0c0000, quorum=127.0.0.1:56064, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/2907d75fbb3e,46061,1733787975505 2024-12-09T23:46:16,441 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46803-0x1000d06bb0c0002, quorum=127.0.0.1:56064, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-09T23:46:16,441 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39769-0x1000d06bb0c0003, quorum=127.0.0.1:56064, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-09T23:46:16,441 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46061-0x1000d06bb0c0000, quorum=127.0.0.1:56064, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T23:46:16,441 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36655-0x1000d06bb0c0001, quorum=127.0.0.1:56064, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-09T23:46:16,441 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46803-0x1000d06bb0c0002, quorum=127.0.0.1:56064, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T23:46:16,441 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39769-0x1000d06bb0c0003, quorum=127.0.0.1:56064, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T23:46:16,442 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36655-0x1000d06bb0c0001, quorum=127.0.0.1:56064, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T23:46:16,443 DEBUG [master/2907d75fbb3e:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:46061-0x1000d06bb0c0000, quorum=127.0.0.1:56064, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-09T23:46:16,444 INFO [master/2907d75fbb3e:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/2907d75fbb3e,46061,1733787975505 from backup master directory 2024-12-09T23:46:16,456 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39769-0x1000d06bb0c0003, quorum=127.0.0.1:56064, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T23:46:16,456 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36655-0x1000d06bb0c0001, quorum=127.0.0.1:56064, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T23:46:16,456 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46803-0x1000d06bb0c0002, quorum=127.0.0.1:56064, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T23:46:16,456 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46061-0x1000d06bb0c0000, quorum=127.0.0.1:56064, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/2907d75fbb3e,46061,1733787975505 2024-12-09T23:46:16,456 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46061-0x1000d06bb0c0000, quorum=127.0.0.1:56064, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T23:46:16,457 WARN [master/2907d75fbb3e:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-09T23:46:16,457 INFO [master/2907d75fbb3e:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=2907d75fbb3e,46061,1733787975505 2024-12-09T23:46:16,459 INFO [master/2907d75fbb3e:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-12-09T23:46:16,460 INFO [master/2907d75fbb3e:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-12-09T23:46:16,514 DEBUG [master/2907d75fbb3e:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:37867/user/jenkins/test-data/1c338360-f03d-9fa5-8641-bcfa93b60f00/hbase.id] with ID: 8f6e3f4a-b4b0-4707-ac61-7c5b224747e3 2024-12-09T23:46:16,515 DEBUG [master/2907d75fbb3e:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:37867/user/jenkins/test-data/1c338360-f03d-9fa5-8641-bcfa93b60f00/.tmp/hbase.id 2024-12-09T23:46:16,521 WARN [master/2907d75fbb3e:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T23:46:16,521 WARN [master/2907d75fbb3e:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T23:46:16,525 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1542472797_22 at /127.0.0.1:41700 [Receiving block BP-1524071419-172.17.0.2-1733787971502:blk_-9223372036854775776_1003] {}] datanode.DataXceiver(331): 127.0.0.1:39539:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41700 dst: /127.0.0.1:39539 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T23:46:16,531 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39539 is added to blk_-9223372036854775776_1004 (size=42) 2024-12-09T23:46:16,532 WARN [master/2907d75fbb3e:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-09T23:46:16,533 DEBUG [master/2907d75fbb3e:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:37867/user/jenkins/test-data/1c338360-f03d-9fa5-8641-bcfa93b60f00/.tmp/hbase.id]:[hdfs://localhost:37867/user/jenkins/test-data/1c338360-f03d-9fa5-8641-bcfa93b60f00/hbase.id] 2024-12-09T23:46:16,577 INFO [master/2907d75fbb3e:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T23:46:16,580 INFO [master/2907d75fbb3e:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-09T23:46:16,596 INFO [master/2907d75fbb3e:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 15ms. 2024-12-09T23:46:16,623 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39769-0x1000d06bb0c0003, quorum=127.0.0.1:56064, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T23:46:16,623 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46061-0x1000d06bb0c0000, quorum=127.0.0.1:56064, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T23:46:16,623 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36655-0x1000d06bb0c0001, quorum=127.0.0.1:56064, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T23:46:16,623 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46803-0x1000d06bb0c0002, quorum=127.0.0.1:56064, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T23:46:16,638 WARN [master/2907d75fbb3e:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T23:46:16,638 WARN [master/2907d75fbb3e:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T23:46:16,641 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1542472797_22 at /127.0.0.1:55060 [Receiving block BP-1524071419-172.17.0.2-1733787971502:blk_-9223372036854775760_1005] {}] datanode.DataXceiver(331): 127.0.0.1:41073:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:55060 dst: /127.0.0.1:41073 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T23:46:16,645 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41073 is added to blk_-9223372036854775760_1006 (size=196) 2024-12-09T23:46:16,646 WARN [master/2907d75fbb3e:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-09T23:46:16,660 INFO [master/2907d75fbb3e:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-09T23:46:16,662 INFO [master/2907d75fbb3e:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-09T23:46:16,667 INFO [master/2907d75fbb3e:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-09T23:46:16,690 WARN [master/2907d75fbb3e:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T23:46:16,690 WARN [master/2907d75fbb3e:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T23:46:16,693 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1542472797_22 at /127.0.0.1:55064 [Receiving block BP-1524071419-172.17.0.2-1733787971502:blk_-9223372036854775744_1007] {}] datanode.DataXceiver(331): 127.0.0.1:41073:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:55064 dst: /127.0.0.1:41073 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T23:46:16,697 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41073 is added to blk_-9223372036854775744_1008 (size=1189) 2024-12-09T23:46:16,698 WARN [master/2907d75fbb3e:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-09T23:46:16,712 INFO [master/2907d75fbb3e:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:37867/user/jenkins/test-data/1c338360-f03d-9fa5-8641-bcfa93b60f00/MasterData/data/master/store 2024-12-09T23:46:16,726 WARN [master/2907d75fbb3e:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T23:46:16,726 WARN [master/2907d75fbb3e:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T23:46:16,732 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1542472797_22 at /127.0.0.1:41734 [Receiving block BP-1524071419-172.17.0.2-1733787971502:blk_-9223372036854775728_1009] {}] datanode.DataXceiver(331): 127.0.0.1:39539:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41734 dst: /127.0.0.1:39539 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T23:46:16,736 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39539 is added to blk_-9223372036854775728_1010 (size=34) 2024-12-09T23:46:16,737 WARN [master/2907d75fbb3e:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-09T23:46:16,741 INFO [master/2907d75fbb3e:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-12-09T23:46:16,744 DEBUG [master/2907d75fbb3e:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T23:46:16,745 DEBUG [master/2907d75fbb3e:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-09T23:46:16,745 INFO [master/2907d75fbb3e:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T23:46:16,746 DEBUG [master/2907d75fbb3e:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T23:46:16,747 DEBUG [master/2907d75fbb3e:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-09T23:46:16,747 DEBUG [master/2907d75fbb3e:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T23:46:16,747 INFO [master/2907d75fbb3e:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T23:46:16,748 DEBUG [master/2907d75fbb3e:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733787976745Disabling compacts and flushes for region at 1733787976745Disabling writes for close at 1733787976747 (+2 ms)Writing region close event to WAL at 1733787976747Closed at 1733787976747 2024-12-09T23:46:16,751 WARN [master/2907d75fbb3e:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:37867/user/jenkins/test-data/1c338360-f03d-9fa5-8641-bcfa93b60f00/MasterData/data/master/store/.initializing 2024-12-09T23:46:16,751 DEBUG [master/2907d75fbb3e:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:37867/user/jenkins/test-data/1c338360-f03d-9fa5-8641-bcfa93b60f00/MasterData/WALs/2907d75fbb3e,46061,1733787975505 2024-12-09T23:46:16,759 INFO [master/2907d75fbb3e:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-09T23:46:16,773 INFO [master/2907d75fbb3e:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=2907d75fbb3e%2C46061%2C1733787975505, suffix=, logDir=hdfs://localhost:37867/user/jenkins/test-data/1c338360-f03d-9fa5-8641-bcfa93b60f00/MasterData/WALs/2907d75fbb3e,46061,1733787975505, archiveDir=hdfs://localhost:37867/user/jenkins/test-data/1c338360-f03d-9fa5-8641-bcfa93b60f00/MasterData/oldWALs, maxLogs=10 2024-12-09T23:46:16,809 DEBUG [master/2907d75fbb3e:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/1c338360-f03d-9fa5-8641-bcfa93b60f00/MasterData/WALs/2907d75fbb3e,46061,1733787975505/2907d75fbb3e%2C46061%2C1733787975505.1733787976778, exclude list is [], retry=0 2024-12-09T23:46:16,826 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(244): No decryptEncryptedDataEncryptionKey method in DFSClient, should be hadoop version with HDFS-12396 java.lang.NoSuchMethodException: org.apache.hadoop.hdfs.DFSClient.decryptEncryptedDataEncryptionKey(org.apache.hadoop.fs.FileEncryptionInfo) at java.lang.Class.getDeclaredMethod(Class.java:2675) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelperWithoutHDFS12396(FanOutOneBlockAsyncDFSOutputSaslHelper.java:183) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelper(FanOutOneBlockAsyncDFSOutputSaslHelper.java:242) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.(FanOutOneBlockAsyncDFSOutputSaslHelper.java:253) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.initialize(FanOutOneBlockAsyncDFSOutputHelper.java:413) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper$5.operationComplete(FanOutOneBlockAsyncDFSOutputHelper.java:472) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper$5.operationComplete(FanOutOneBlockAsyncDFSOutputHelper.java:467) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.NettyFutureUtils.lambda$addListener$0(NettyFutureUtils.java:56) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListener0(DefaultPromise.java:590) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListeners0(DefaultPromise.java:583) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListenersNow(DefaultPromise.java:559) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListeners(DefaultPromise.java:492) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.setValue0(DefaultPromise.java:636) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.setSuccess0(DefaultPromise.java:625) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.trySuccess(DefaultPromise.java:105) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPromise.trySuccess(DefaultChannelPromise.java:84) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.fulfillConnectPromise(AbstractEpollChannel.java:658) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.finishConnect(AbstractEpollChannel.java:696) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.epollOutReady(AbstractEpollChannel.java:567) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.processReady(EpollEventLoop.java:491) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:399) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T23:46:16,828 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:41073,DS-d8c9e1ca-b4a4-4c8d-853d-dc76b8edfa36,DISK] 2024-12-09T23:46:16,828 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35729,DS-6c5e70a9-295f-4455-be70-31e472f21fe4,DISK] 2024-12-09T23:46:16,828 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39539,DS-e8617d22-a05c-419e-a033-3e19bdedd3bb,DISK] 2024-12-09T23:46:16,831 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.ProtobufDecoder(117): Hadoop 3.3 and above shades protobuf. 2024-12-09T23:46:16,867 INFO [master/2907d75fbb3e:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/1c338360-f03d-9fa5-8641-bcfa93b60f00/MasterData/WALs/2907d75fbb3e,46061,1733787975505/2907d75fbb3e%2C46061%2C1733787975505.1733787976778 2024-12-09T23:46:16,868 DEBUG [master/2907d75fbb3e:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:36527:36527),(127.0.0.1/127.0.0.1:44409:44409),(127.0.0.1/127.0.0.1:45055:45055)] 2024-12-09T23:46:16,868 DEBUG [master/2907d75fbb3e:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-09T23:46:16,869 DEBUG [master/2907d75fbb3e:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T23:46:16,871 DEBUG [master/2907d75fbb3e:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T23:46:16,872 DEBUG [master/2907d75fbb3e:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T23:46:16,906 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T23:46:16,927 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-09T23:46:16,930 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T23:46:16,932 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T23:46:16,932 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T23:46:16,936 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-09T23:46:16,936 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T23:46:16,937 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T23:46:16,937 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T23:46:16,940 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-09T23:46:16,940 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T23:46:16,941 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T23:46:16,941 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T23:46:16,943 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-09T23:46:16,943 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T23:46:16,944 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T23:46:16,944 DEBUG [master/2907d75fbb3e:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T23:46:16,948 DEBUG [master/2907d75fbb3e:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37867/user/jenkins/test-data/1c338360-f03d-9fa5-8641-bcfa93b60f00/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-09T23:46:16,949 DEBUG [master/2907d75fbb3e:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37867/user/jenkins/test-data/1c338360-f03d-9fa5-8641-bcfa93b60f00/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-09T23:46:16,954 DEBUG [master/2907d75fbb3e:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T23:46:16,954 DEBUG [master/2907d75fbb3e:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T23:46:16,958 DEBUG [master/2907d75fbb3e:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-09T23:46:16,961 DEBUG [master/2907d75fbb3e:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T23:46:16,968 DEBUG [master/2907d75fbb3e:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37867/user/jenkins/test-data/1c338360-f03d-9fa5-8641-bcfa93b60f00/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T23:46:16,969 INFO [master/2907d75fbb3e:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=61810678, jitterRate=-0.07894912362098694}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-09T23:46:16,974 DEBUG [master/2907d75fbb3e:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733787976884Initializing all the Stores at 1733787976886 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733787976886Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733787976887 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733787976887Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733787976887Cleaning up temporary data from old regions at 1733787976955 (+68 ms)Region opened successfully at 1733787976974 (+19 ms) 2024-12-09T23:46:16,975 INFO [master/2907d75fbb3e:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-09T23:46:17,004 DEBUG [master/2907d75fbb3e:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@637b573a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=2907d75fbb3e/172.17.0.2:0 2024-12-09T23:46:17,028 INFO [master/2907d75fbb3e:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-09T23:46:17,037 INFO [master/2907d75fbb3e:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-09T23:46:17,037 INFO [master/2907d75fbb3e:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-09T23:46:17,039 INFO [master/2907d75fbb3e:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-09T23:46:17,041 INFO [master/2907d75fbb3e:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 1 msec 2024-12-09T23:46:17,046 INFO [master/2907d75fbb3e:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 4 msec 2024-12-09T23:46:17,046 INFO [master/2907d75fbb3e:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-09T23:46:17,068 INFO [master/2907d75fbb3e:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-09T23:46:17,076 DEBUG [master/2907d75fbb3e:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46061-0x1000d06bb0c0000, quorum=127.0.0.1:56064, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-09T23:46:17,108 DEBUG [master/2907d75fbb3e:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-09T23:46:17,110 INFO [master/2907d75fbb3e:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-09T23:46:17,112 DEBUG [master/2907d75fbb3e:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46061-0x1000d06bb0c0000, quorum=127.0.0.1:56064, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-09T23:46:17,122 DEBUG [master/2907d75fbb3e:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-09T23:46:17,125 INFO [master/2907d75fbb3e:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-09T23:46:17,130 DEBUG [master/2907d75fbb3e:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46061-0x1000d06bb0c0000, quorum=127.0.0.1:56064, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-09T23:46:17,141 DEBUG [master/2907d75fbb3e:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-09T23:46:17,142 DEBUG [master/2907d75fbb3e:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46061-0x1000d06bb0c0000, quorum=127.0.0.1:56064, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-09T23:46:17,149 DEBUG [master/2907d75fbb3e:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-09T23:46:17,169 DEBUG [master/2907d75fbb3e:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46061-0x1000d06bb0c0000, quorum=127.0.0.1:56064, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-09T23:46:17,174 DEBUG [master/2907d75fbb3e:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-09T23:46:17,183 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46061-0x1000d06bb0c0000, quorum=127.0.0.1:56064, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-09T23:46:17,183 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46803-0x1000d06bb0c0002, quorum=127.0.0.1:56064, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-09T23:46:17,183 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36655-0x1000d06bb0c0001, quorum=127.0.0.1:56064, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-09T23:46:17,183 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46061-0x1000d06bb0c0000, quorum=127.0.0.1:56064, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T23:46:17,183 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46803-0x1000d06bb0c0002, quorum=127.0.0.1:56064, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T23:46:17,183 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36655-0x1000d06bb0c0001, quorum=127.0.0.1:56064, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T23:46:17,183 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39769-0x1000d06bb0c0003, quorum=127.0.0.1:56064, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-09T23:46:17,184 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39769-0x1000d06bb0c0003, quorum=127.0.0.1:56064, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T23:46:17,187 INFO [master/2907d75fbb3e:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=2907d75fbb3e,46061,1733787975505, sessionid=0x1000d06bb0c0000, setting cluster-up flag (Was=false) 2024-12-09T23:46:17,216 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46061-0x1000d06bb0c0000, quorum=127.0.0.1:56064, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T23:46:17,216 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39769-0x1000d06bb0c0003, quorum=127.0.0.1:56064, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T23:46:17,216 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36655-0x1000d06bb0c0001, quorum=127.0.0.1:56064, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T23:46:17,216 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46803-0x1000d06bb0c0002, quorum=127.0.0.1:56064, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T23:46:17,241 DEBUG [master/2907d75fbb3e:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-09T23:46:17,246 DEBUG [master/2907d75fbb3e:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=2907d75fbb3e,46061,1733787975505 2024-12-09T23:46:17,266 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36655-0x1000d06bb0c0001, quorum=127.0.0.1:56064, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T23:46:17,266 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46061-0x1000d06bb0c0000, quorum=127.0.0.1:56064, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T23:46:17,266 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39769-0x1000d06bb0c0003, quorum=127.0.0.1:56064, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T23:46:17,266 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46803-0x1000d06bb0c0002, quorum=127.0.0.1:56064, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T23:46:17,291 DEBUG [master/2907d75fbb3e:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-09T23:46:17,294 DEBUG [master/2907d75fbb3e:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=2907d75fbb3e,46061,1733787975505 2024-12-09T23:46:17,304 INFO [master/2907d75fbb3e:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:37867/user/jenkins/test-data/1c338360-f03d-9fa5-8641-bcfa93b60f00/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-09T23:46:17,370 DEBUG [master/2907d75fbb3e:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-09T23:46:17,378 INFO [RS:0;2907d75fbb3e:36655 {}] regionserver.HRegionServer(746): ClusterId : 8f6e3f4a-b4b0-4707-ac61-7c5b224747e3 2024-12-09T23:46:17,378 INFO [RS:2;2907d75fbb3e:39769 {}] regionserver.HRegionServer(746): ClusterId : 8f6e3f4a-b4b0-4707-ac61-7c5b224747e3 2024-12-09T23:46:17,378 INFO [RS:1;2907d75fbb3e:46803 {}] regionserver.HRegionServer(746): ClusterId : 8f6e3f4a-b4b0-4707-ac61-7c5b224747e3 2024-12-09T23:46:17,379 INFO [master/2907d75fbb3e:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-09T23:46:17,380 DEBUG [RS:0;2907d75fbb3e:36655 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-09T23:46:17,380 DEBUG [RS:1;2907d75fbb3e:46803 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-09T23:46:17,380 DEBUG [RS:2;2907d75fbb3e:39769 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-09T23:46:17,386 INFO [master/2907d75fbb3e:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-09T23:46:17,391 DEBUG [master/2907d75fbb3e:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 2907d75fbb3e,46061,1733787975505 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-09T23:46:17,401 DEBUG [RS:2;2907d75fbb3e:39769 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-09T23:46:17,401 DEBUG [RS:0;2907d75fbb3e:36655 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-09T23:46:17,401 DEBUG [RS:1;2907d75fbb3e:46803 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-09T23:46:17,401 DEBUG [RS:2;2907d75fbb3e:39769 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-09T23:46:17,401 DEBUG [RS:0;2907d75fbb3e:36655 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-09T23:46:17,401 DEBUG [RS:1;2907d75fbb3e:46803 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-09T23:46:17,402 DEBUG [master/2907d75fbb3e:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/2907d75fbb3e:0, corePoolSize=5, maxPoolSize=5 2024-12-09T23:46:17,402 DEBUG [master/2907d75fbb3e:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/2907d75fbb3e:0, corePoolSize=5, maxPoolSize=5 2024-12-09T23:46:17,402 DEBUG [master/2907d75fbb3e:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/2907d75fbb3e:0, corePoolSize=5, maxPoolSize=5 2024-12-09T23:46:17,402 DEBUG [master/2907d75fbb3e:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/2907d75fbb3e:0, corePoolSize=5, maxPoolSize=5 2024-12-09T23:46:17,403 DEBUG [master/2907d75fbb3e:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/2907d75fbb3e:0, corePoolSize=10, maxPoolSize=10 2024-12-09T23:46:17,403 DEBUG [master/2907d75fbb3e:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/2907d75fbb3e:0, corePoolSize=1, maxPoolSize=1 2024-12-09T23:46:17,403 DEBUG [master/2907d75fbb3e:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/2907d75fbb3e:0, corePoolSize=2, maxPoolSize=2 2024-12-09T23:46:17,403 DEBUG [master/2907d75fbb3e:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/2907d75fbb3e:0, corePoolSize=1, maxPoolSize=1 2024-12-09T23:46:17,408 INFO [master/2907d75fbb3e:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733788007407 2024-12-09T23:46:17,409 DEBUG [RS:2;2907d75fbb3e:39769 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-09T23:46:17,409 DEBUG [RS:1;2907d75fbb3e:46803 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-09T23:46:17,409 DEBUG [RS:0;2907d75fbb3e:36655 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-09T23:46:17,410 DEBUG [RS:0;2907d75fbb3e:36655 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5fa2491d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=2907d75fbb3e/172.17.0.2:0 2024-12-09T23:46:17,410 DEBUG [RS:1;2907d75fbb3e:46803 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@296243c5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=2907d75fbb3e/172.17.0.2:0 2024-12-09T23:46:17,410 DEBUG [RS:2;2907d75fbb3e:39769 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@bf1c92f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=2907d75fbb3e/172.17.0.2:0 2024-12-09T23:46:17,410 INFO [master/2907d75fbb3e:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-09T23:46:17,411 INFO [master/2907d75fbb3e:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-09T23:46:17,411 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-09T23:46:17,412 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-09T23:46:17,415 INFO [master/2907d75fbb3e:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-09T23:46:17,415 INFO [master/2907d75fbb3e:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-09T23:46:17,416 INFO [master/2907d75fbb3e:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-09T23:46:17,416 INFO [master/2907d75fbb3e:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-09T23:46:17,423 INFO [master/2907d75fbb3e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-09T23:46:17,427 DEBUG [RS:0;2907d75fbb3e:36655 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;2907d75fbb3e:36655 2024-12-09T23:46:17,428 DEBUG [RS:1;2907d75fbb3e:46803 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;2907d75fbb3e:46803 2024-12-09T23:46:17,428 DEBUG [RS:2;2907d75fbb3e:39769 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:2;2907d75fbb3e:39769 2024-12-09T23:46:17,430 INFO [master/2907d75fbb3e:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-09T23:46:17,431 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T23:46:17,431 INFO [master/2907d75fbb3e:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-09T23:46:17,431 INFO [RS:2;2907d75fbb3e:39769 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-09T23:46:17,431 INFO [RS:0;2907d75fbb3e:36655 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-09T23:46:17,431 INFO [RS:1;2907d75fbb3e:46803 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-09T23:46:17,431 INFO [RS:2;2907d75fbb3e:39769 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-09T23:46:17,431 INFO [RS:0;2907d75fbb3e:36655 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-09T23:46:17,431 INFO [RS:1;2907d75fbb3e:46803 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-09T23:46:17,431 DEBUG [RS:2;2907d75fbb3e:39769 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-09T23:46:17,431 DEBUG [RS:1;2907d75fbb3e:46803 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-09T23:46:17,431 DEBUG [RS:0;2907d75fbb3e:36655 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-09T23:46:17,431 INFO [master/2907d75fbb3e:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-09T23:46:17,431 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-09T23:46:17,434 INFO [RS:1;2907d75fbb3e:46803 {}] regionserver.HRegionServer(2659): reportForDuty to master=2907d75fbb3e,46061,1733787975505 with port=46803, startcode=1733787976254 2024-12-09T23:46:17,434 INFO [RS:2;2907d75fbb3e:39769 {}] regionserver.HRegionServer(2659): reportForDuty to master=2907d75fbb3e,46061,1733787975505 with port=39769, startcode=1733787976325 2024-12-09T23:46:17,434 INFO [RS:0;2907d75fbb3e:36655 {}] regionserver.HRegionServer(2659): reportForDuty to master=2907d75fbb3e,46061,1733787975505 with port=36655, startcode=1733787976159 2024-12-09T23:46:17,439 INFO [master/2907d75fbb3e:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-09T23:46:17,439 INFO [master/2907d75fbb3e:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-09T23:46:17,441 DEBUG [master/2907d75fbb3e:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/2907d75fbb3e:0:becomeActiveMaster-HFileCleaner.large.0-1733787977440,5,FailOnTimeoutGroup] 2024-12-09T23:46:17,442 DEBUG [master/2907d75fbb3e:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/2907d75fbb3e:0:becomeActiveMaster-HFileCleaner.small.0-1733787977442,5,FailOnTimeoutGroup] 2024-12-09T23:46:17,442 INFO [master/2907d75fbb3e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-09T23:46:17,443 INFO [master/2907d75fbb3e:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-09T23:46:17,444 INFO [master/2907d75fbb3e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-09T23:46:17,444 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T23:46:17,444 INFO [master/2907d75fbb3e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-09T23:46:17,444 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T23:46:17,447 DEBUG [RS:2;2907d75fbb3e:39769 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-09T23:46:17,447 DEBUG [RS:0;2907d75fbb3e:36655 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-09T23:46:17,447 DEBUG [RS:1;2907d75fbb3e:46803 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-09T23:46:17,452 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1542472797_22 at /127.0.0.1:60184 [Receiving block BP-1524071419-172.17.0.2-1733787971502:blk_-9223372036854775712_1012] {}] datanode.DataXceiver(331): 127.0.0.1:35729:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60184 dst: /127.0.0.1:35729 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T23:46:17,458 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35729 is added to blk_-9223372036854775712_1013 (size=1321) 2024-12-09T23:46:17,459 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-09T23:46:17,460 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:37867/user/jenkins/test-data/1c338360-f03d-9fa5-8641-bcfa93b60f00/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-09T23:46:17,461 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:37867/user/jenkins/test-data/1c338360-f03d-9fa5-8641-bcfa93b60f00 2024-12-09T23:46:17,480 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T23:46:17,480 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T23:46:17,494 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36115, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-12-09T23:46:17,494 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60429, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-12-09T23:46:17,494 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33753, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-12-09T23:46:17,495 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1542472797_22 at /127.0.0.1:60202 [Receiving block BP-1524071419-172.17.0.2-1733787971502:blk_-9223372036854775696_1014] {}] datanode.DataXceiver(331): 127.0.0.1:35729:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60202 dst: /127.0.0.1:35729 java.io.InterruptedIOException: Interrupted receiveBlock at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T23:46:17,501 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46061 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 2907d75fbb3e,36655,1733787976159 2024-12-09T23:46:17,504 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46061 {}] master.ServerManager(517): Registering regionserver=2907d75fbb3e,36655,1733787976159 2024-12-09T23:46:17,507 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35729 is added to blk_-9223372036854775696_1015 (size=32) 2024-12-09T23:46:17,509 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39539 is added to blk_-9223372036854775788_1002 (size=7) 2024-12-09T23:46:17,525 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35729 is added to blk_-9223372036854775789_1002 (size=7) 2024-12-09T23:46:17,525 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-09T23:46:17,526 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T23:46:17,529 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-09T23:46:17,532 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-09T23:46:17,532 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T23:46:17,533 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T23:46:17,533 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-09T23:46:17,536 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46061 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 2907d75fbb3e,39769,1733787976325 2024-12-09T23:46:17,536 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-09T23:46:17,536 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46061 {}] master.ServerManager(517): Registering regionserver=2907d75fbb3e,39769,1733787976325 2024-12-09T23:46:17,536 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T23:46:17,538 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T23:46:17,538 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-09T23:46:17,540 DEBUG [RS:0;2907d75fbb3e:36655 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:37867/user/jenkins/test-data/1c338360-f03d-9fa5-8641-bcfa93b60f00 2024-12-09T23:46:17,540 DEBUG [RS:0;2907d75fbb3e:36655 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:37867 2024-12-09T23:46:17,540 DEBUG [RS:0;2907d75fbb3e:36655 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-09T23:46:17,542 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-09T23:46:17,542 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T23:46:17,543 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T23:46:17,544 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46061 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 2907d75fbb3e,46803,1733787976254 2024-12-09T23:46:17,544 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46061 {}] master.ServerManager(517): Registering regionserver=2907d75fbb3e,46803,1733787976254 2024-12-09T23:46:17,544 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-09T23:46:17,544 DEBUG [RS:2;2907d75fbb3e:39769 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:37867/user/jenkins/test-data/1c338360-f03d-9fa5-8641-bcfa93b60f00 2024-12-09T23:46:17,544 DEBUG [RS:2;2907d75fbb3e:39769 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:37867 2024-12-09T23:46:17,544 DEBUG [RS:2;2907d75fbb3e:39769 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-09T23:46:17,547 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-09T23:46:17,547 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T23:46:17,547 DEBUG [RS:1;2907d75fbb3e:46803 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:37867/user/jenkins/test-data/1c338360-f03d-9fa5-8641-bcfa93b60f00 2024-12-09T23:46:17,547 DEBUG [RS:1;2907d75fbb3e:46803 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:37867 2024-12-09T23:46:17,547 DEBUG [RS:1;2907d75fbb3e:46803 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-09T23:46:17,548 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T23:46:17,548 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-09T23:46:17,549 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37867/user/jenkins/test-data/1c338360-f03d-9fa5-8641-bcfa93b60f00/data/hbase/meta/1588230740 2024-12-09T23:46:17,550 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37867/user/jenkins/test-data/1c338360-f03d-9fa5-8641-bcfa93b60f00/data/hbase/meta/1588230740 2024-12-09T23:46:17,553 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-09T23:46:17,553 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-09T23:46:17,554 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-09T23:46:17,557 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-09T23:46:17,562 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37867/user/jenkins/test-data/1c338360-f03d-9fa5-8641-bcfa93b60f00/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T23:46:17,563 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=73018965, jitterRate=0.08806736767292023}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-09T23:46:17,565 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733787977526Initializing all the Stores at 1733787977529 (+3 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733787977529Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733787977529Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733787977529Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733787977529Cleaning up temporary data from old regions at 1733787977553 (+24 ms)Region opened successfully at 1733787977565 (+12 ms) 2024-12-09T23:46:17,565 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-09T23:46:17,566 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-09T23:46:17,566 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-09T23:46:17,566 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-09T23:46:17,566 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-09T23:46:17,567 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-09T23:46:17,567 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733787977565Disabling compacts and flushes for region at 1733787977565Disabling writes for close at 1733787977566 (+1 ms)Writing region close event to WAL at 1733787977566Closed at 1733787977567 (+1 ms) 2024-12-09T23:46:17,570 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-09T23:46:17,570 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-09T23:46:17,572 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46061-0x1000d06bb0c0000, quorum=127.0.0.1:56064, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-09T23:46:17,576 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-09T23:46:17,583 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-09T23:46:17,586 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-09T23:46:17,603 DEBUG [RS:0;2907d75fbb3e:36655 {}] zookeeper.ZKUtil(111): regionserver:36655-0x1000d06bb0c0001, quorum=127.0.0.1:56064, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/2907d75fbb3e,36655,1733787976159 2024-12-09T23:46:17,604 WARN [RS:0;2907d75fbb3e:36655 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-09T23:46:17,604 INFO [RS:0;2907d75fbb3e:36655 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-09T23:46:17,604 DEBUG [RS:0;2907d75fbb3e:36655 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:37867/user/jenkins/test-data/1c338360-f03d-9fa5-8641-bcfa93b60f00/WALs/2907d75fbb3e,36655,1733787976159 2024-12-09T23:46:17,604 DEBUG [RS:1;2907d75fbb3e:46803 {}] zookeeper.ZKUtil(111): regionserver:46803-0x1000d06bb0c0002, quorum=127.0.0.1:56064, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/2907d75fbb3e,46803,1733787976254 2024-12-09T23:46:17,604 DEBUG [RS:2;2907d75fbb3e:39769 {}] zookeeper.ZKUtil(111): regionserver:39769-0x1000d06bb0c0003, quorum=127.0.0.1:56064, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/2907d75fbb3e,39769,1733787976325 2024-12-09T23:46:17,604 WARN [RS:1;2907d75fbb3e:46803 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-09T23:46:17,604 WARN [RS:2;2907d75fbb3e:39769 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-09T23:46:17,604 INFO [RS:1;2907d75fbb3e:46803 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-09T23:46:17,604 INFO [RS:2;2907d75fbb3e:39769 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-09T23:46:17,605 DEBUG [RS:1;2907d75fbb3e:46803 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:37867/user/jenkins/test-data/1c338360-f03d-9fa5-8641-bcfa93b60f00/WALs/2907d75fbb3e,46803,1733787976254 2024-12-09T23:46:17,605 DEBUG [RS:2;2907d75fbb3e:39769 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:37867/user/jenkins/test-data/1c338360-f03d-9fa5-8641-bcfa93b60f00/WALs/2907d75fbb3e,39769,1733787976325 2024-12-09T23:46:17,606 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [2907d75fbb3e,36655,1733787976159] 2024-12-09T23:46:17,606 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [2907d75fbb3e,46803,1733787976254] 2024-12-09T23:46:17,606 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [2907d75fbb3e,39769,1733787976325] 2024-12-09T23:46:17,629 INFO [RS:0;2907d75fbb3e:36655 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-09T23:46:17,629 INFO [RS:2;2907d75fbb3e:39769 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-09T23:46:17,629 INFO [RS:1;2907d75fbb3e:46803 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-09T23:46:17,644 INFO [RS:2;2907d75fbb3e:39769 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-09T23:46:17,644 INFO [RS:1;2907d75fbb3e:46803 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-09T23:46:17,647 INFO [RS:0;2907d75fbb3e:36655 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-09T23:46:17,650 INFO [RS:0;2907d75fbb3e:36655 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-09T23:46:17,650 INFO [RS:2;2907d75fbb3e:39769 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-09T23:46:17,650 INFO [RS:0;2907d75fbb3e:36655 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T23:46:17,650 INFO [RS:2;2907d75fbb3e:39769 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T23:46:17,651 INFO [RS:1;2907d75fbb3e:46803 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-09T23:46:17,651 INFO [RS:1;2907d75fbb3e:46803 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T23:46:17,651 INFO [RS:0;2907d75fbb3e:36655 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-09T23:46:17,652 INFO [RS:2;2907d75fbb3e:39769 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-09T23:46:17,652 INFO [RS:1;2907d75fbb3e:46803 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-09T23:46:17,660 INFO [RS:0;2907d75fbb3e:36655 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-09T23:46:17,662 INFO [RS:0;2907d75fbb3e:36655 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-09T23:46:17,662 DEBUG [RS:0;2907d75fbb3e:36655 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/2907d75fbb3e:0, corePoolSize=1, maxPoolSize=1 2024-12-09T23:46:17,662 DEBUG [RS:0;2907d75fbb3e:36655 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/2907d75fbb3e:0, corePoolSize=1, maxPoolSize=1 2024-12-09T23:46:17,662 INFO [RS:2;2907d75fbb3e:39769 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-09T23:46:17,662 DEBUG [RS:0;2907d75fbb3e:36655 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/2907d75fbb3e:0, corePoolSize=1, maxPoolSize=1 2024-12-09T23:46:17,662 DEBUG [RS:0;2907d75fbb3e:36655 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/2907d75fbb3e:0, corePoolSize=1, maxPoolSize=1 2024-12-09T23:46:17,662 INFO [RS:2;2907d75fbb3e:39769 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-09T23:46:17,662 DEBUG [RS:0;2907d75fbb3e:36655 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/2907d75fbb3e:0, corePoolSize=1, maxPoolSize=1 2024-12-09T23:46:17,662 DEBUG [RS:0;2907d75fbb3e:36655 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/2907d75fbb3e:0, corePoolSize=2, maxPoolSize=2 2024-12-09T23:46:17,662 DEBUG [RS:2;2907d75fbb3e:39769 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/2907d75fbb3e:0, corePoolSize=1, maxPoolSize=1 2024-12-09T23:46:17,663 DEBUG [RS:0;2907d75fbb3e:36655 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/2907d75fbb3e:0, corePoolSize=1, maxPoolSize=1 2024-12-09T23:46:17,663 DEBUG [RS:2;2907d75fbb3e:39769 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/2907d75fbb3e:0, corePoolSize=1, maxPoolSize=1 2024-12-09T23:46:17,663 DEBUG [RS:0;2907d75fbb3e:36655 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/2907d75fbb3e:0, corePoolSize=1, maxPoolSize=1 2024-12-09T23:46:17,663 DEBUG [RS:2;2907d75fbb3e:39769 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/2907d75fbb3e:0, corePoolSize=1, maxPoolSize=1 2024-12-09T23:46:17,663 DEBUG [RS:0;2907d75fbb3e:36655 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/2907d75fbb3e:0, corePoolSize=1, maxPoolSize=1 2024-12-09T23:46:17,663 DEBUG [RS:2;2907d75fbb3e:39769 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/2907d75fbb3e:0, corePoolSize=1, maxPoolSize=1 2024-12-09T23:46:17,663 DEBUG [RS:0;2907d75fbb3e:36655 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/2907d75fbb3e:0, corePoolSize=1, maxPoolSize=1 2024-12-09T23:46:17,663 DEBUG [RS:2;2907d75fbb3e:39769 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/2907d75fbb3e:0, corePoolSize=1, maxPoolSize=1 2024-12-09T23:46:17,663 DEBUG [RS:0;2907d75fbb3e:36655 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/2907d75fbb3e:0, corePoolSize=1, maxPoolSize=1 2024-12-09T23:46:17,663 DEBUG [RS:2;2907d75fbb3e:39769 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/2907d75fbb3e:0, corePoolSize=2, maxPoolSize=2 2024-12-09T23:46:17,663 DEBUG [RS:0;2907d75fbb3e:36655 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/2907d75fbb3e:0, corePoolSize=1, maxPoolSize=1 2024-12-09T23:46:17,663 DEBUG [RS:2;2907d75fbb3e:39769 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/2907d75fbb3e:0, corePoolSize=1, maxPoolSize=1 2024-12-09T23:46:17,663 DEBUG [RS:0;2907d75fbb3e:36655 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/2907d75fbb3e:0, corePoolSize=3, maxPoolSize=3 2024-12-09T23:46:17,663 DEBUG [RS:2;2907d75fbb3e:39769 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/2907d75fbb3e:0, corePoolSize=1, maxPoolSize=1 2024-12-09T23:46:17,663 DEBUG [RS:0;2907d75fbb3e:36655 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/2907d75fbb3e:0, corePoolSize=3, maxPoolSize=3 2024-12-09T23:46:17,663 DEBUG [RS:2;2907d75fbb3e:39769 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/2907d75fbb3e:0, corePoolSize=1, maxPoolSize=1 2024-12-09T23:46:17,664 DEBUG [RS:2;2907d75fbb3e:39769 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/2907d75fbb3e:0, corePoolSize=1, maxPoolSize=1 2024-12-09T23:46:17,664 DEBUG [RS:2;2907d75fbb3e:39769 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/2907d75fbb3e:0, corePoolSize=1, maxPoolSize=1 2024-12-09T23:46:17,664 DEBUG [RS:2;2907d75fbb3e:39769 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/2907d75fbb3e:0, corePoolSize=1, maxPoolSize=1 2024-12-09T23:46:17,664 DEBUG [RS:2;2907d75fbb3e:39769 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/2907d75fbb3e:0, corePoolSize=3, maxPoolSize=3 2024-12-09T23:46:17,664 INFO [RS:1;2907d75fbb3e:46803 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-09T23:46:17,664 DEBUG [RS:2;2907d75fbb3e:39769 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/2907d75fbb3e:0, corePoolSize=3, maxPoolSize=3 2024-12-09T23:46:17,664 INFO [RS:1;2907d75fbb3e:46803 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-09T23:46:17,664 DEBUG [RS:1;2907d75fbb3e:46803 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/2907d75fbb3e:0, corePoolSize=1, maxPoolSize=1 2024-12-09T23:46:17,664 DEBUG [RS:1;2907d75fbb3e:46803 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/2907d75fbb3e:0, corePoolSize=1, maxPoolSize=1 2024-12-09T23:46:17,665 DEBUG [RS:1;2907d75fbb3e:46803 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/2907d75fbb3e:0, corePoolSize=1, maxPoolSize=1 2024-12-09T23:46:17,665 DEBUG [RS:1;2907d75fbb3e:46803 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/2907d75fbb3e:0, corePoolSize=1, maxPoolSize=1 2024-12-09T23:46:17,665 DEBUG [RS:1;2907d75fbb3e:46803 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/2907d75fbb3e:0, corePoolSize=1, maxPoolSize=1 2024-12-09T23:46:17,665 DEBUG [RS:1;2907d75fbb3e:46803 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/2907d75fbb3e:0, corePoolSize=2, maxPoolSize=2 2024-12-09T23:46:17,665 DEBUG [RS:1;2907d75fbb3e:46803 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/2907d75fbb3e:0, corePoolSize=1, maxPoolSize=1 2024-12-09T23:46:17,665 DEBUG [RS:1;2907d75fbb3e:46803 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/2907d75fbb3e:0, corePoolSize=1, maxPoolSize=1 2024-12-09T23:46:17,665 DEBUG [RS:1;2907d75fbb3e:46803 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/2907d75fbb3e:0, corePoolSize=1, maxPoolSize=1 2024-12-09T23:46:17,665 DEBUG [RS:1;2907d75fbb3e:46803 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/2907d75fbb3e:0, corePoolSize=1, maxPoolSize=1 2024-12-09T23:46:17,665 DEBUG [RS:1;2907d75fbb3e:46803 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/2907d75fbb3e:0, corePoolSize=1, maxPoolSize=1 2024-12-09T23:46:17,665 DEBUG [RS:1;2907d75fbb3e:46803 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/2907d75fbb3e:0, corePoolSize=1, maxPoolSize=1 2024-12-09T23:46:17,665 DEBUG [RS:1;2907d75fbb3e:46803 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/2907d75fbb3e:0, corePoolSize=3, maxPoolSize=3 2024-12-09T23:46:17,666 DEBUG [RS:1;2907d75fbb3e:46803 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/2907d75fbb3e:0, corePoolSize=3, maxPoolSize=3 2024-12-09T23:46:17,680 INFO [RS:2;2907d75fbb3e:39769 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T23:46:17,680 INFO [RS:0;2907d75fbb3e:36655 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T23:46:17,680 INFO [RS:1;2907d75fbb3e:46803 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T23:46:17,680 INFO [RS:2;2907d75fbb3e:39769 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T23:46:17,680 INFO [RS:1;2907d75fbb3e:46803 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T23:46:17,680 INFO [RS:2;2907d75fbb3e:39769 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T23:46:17,680 INFO [RS:1;2907d75fbb3e:46803 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T23:46:17,680 INFO [RS:2;2907d75fbb3e:39769 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-09T23:46:17,680 INFO [RS:1;2907d75fbb3e:46803 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-09T23:46:17,680 INFO [RS:2;2907d75fbb3e:39769 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-09T23:46:17,680 INFO [RS:1;2907d75fbb3e:46803 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-09T23:46:17,680 INFO [RS:2;2907d75fbb3e:39769 {}] hbase.ChoreService(168): Chore ScheduledChore name=2907d75fbb3e,39769,1733787976325-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-09T23:46:17,682 INFO [RS:0;2907d75fbb3e:36655 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T23:46:17,682 INFO [RS:0;2907d75fbb3e:36655 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T23:46:17,682 INFO [RS:0;2907d75fbb3e:36655 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-09T23:46:17,682 INFO [RS:0;2907d75fbb3e:36655 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-09T23:46:17,683 INFO [RS:1;2907d75fbb3e:46803 {}] hbase.ChoreService(168): Chore ScheduledChore name=2907d75fbb3e,46803,1733787976254-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-09T23:46:17,683 INFO [RS:0;2907d75fbb3e:36655 {}] hbase.ChoreService(168): Chore ScheduledChore name=2907d75fbb3e,36655,1733787976159-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-09T23:46:17,711 INFO [RS:2;2907d75fbb3e:39769 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-09T23:46:17,714 INFO [RS:2;2907d75fbb3e:39769 {}] hbase.ChoreService(168): Chore ScheduledChore name=2907d75fbb3e,39769,1733787976325-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T23:46:17,714 INFO [RS:2;2907d75fbb3e:39769 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T23:46:17,714 INFO [RS:2;2907d75fbb3e:39769 {}] regionserver.Replication(171): 2907d75fbb3e,39769,1733787976325 started 2024-12-09T23:46:17,717 INFO [RS:1;2907d75fbb3e:46803 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-09T23:46:17,717 INFO [RS:1;2907d75fbb3e:46803 {}] hbase.ChoreService(168): Chore ScheduledChore name=2907d75fbb3e,46803,1733787976254-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T23:46:17,717 INFO [RS:1;2907d75fbb3e:46803 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T23:46:17,717 INFO [RS:1;2907d75fbb3e:46803 {}] regionserver.Replication(171): 2907d75fbb3e,46803,1733787976254 started 2024-12-09T23:46:17,717 INFO [RS:0;2907d75fbb3e:36655 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-09T23:46:17,718 INFO [RS:0;2907d75fbb3e:36655 {}] hbase.ChoreService(168): Chore ScheduledChore name=2907d75fbb3e,36655,1733787976159-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T23:46:17,718 INFO [RS:0;2907d75fbb3e:36655 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T23:46:17,718 INFO [RS:0;2907d75fbb3e:36655 {}] regionserver.Replication(171): 2907d75fbb3e,36655,1733787976159 started 2024-12-09T23:46:17,736 INFO [RS:2;2907d75fbb3e:39769 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T23:46:17,736 INFO [RS:2;2907d75fbb3e:39769 {}] regionserver.HRegionServer(1482): Serving as 2907d75fbb3e,39769,1733787976325, RpcServer on 2907d75fbb3e/172.17.0.2:39769, sessionid=0x1000d06bb0c0003 2024-12-09T23:46:17,737 DEBUG [RS:2;2907d75fbb3e:39769 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-09T23:46:17,737 WARN [2907d75fbb3e:46061 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-09T23:46:17,738 DEBUG [RS:2;2907d75fbb3e:39769 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 2907d75fbb3e,39769,1733787976325 2024-12-09T23:46:17,738 DEBUG [RS:2;2907d75fbb3e:39769 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '2907d75fbb3e,39769,1733787976325' 2024-12-09T23:46:17,738 DEBUG [RS:2;2907d75fbb3e:39769 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-09T23:46:17,739 DEBUG [RS:2;2907d75fbb3e:39769 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-09T23:46:17,739 INFO [RS:0;2907d75fbb3e:36655 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T23:46:17,740 INFO [RS:0;2907d75fbb3e:36655 {}] regionserver.HRegionServer(1482): Serving as 2907d75fbb3e,36655,1733787976159, RpcServer on 2907d75fbb3e/172.17.0.2:36655, sessionid=0x1000d06bb0c0001 2024-12-09T23:46:17,740 DEBUG [RS:2;2907d75fbb3e:39769 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-09T23:46:17,740 DEBUG [RS:2;2907d75fbb3e:39769 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-09T23:46:17,740 DEBUG [RS:0;2907d75fbb3e:36655 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-09T23:46:17,740 DEBUG [RS:0;2907d75fbb3e:36655 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 2907d75fbb3e,36655,1733787976159 2024-12-09T23:46:17,740 DEBUG [RS:0;2907d75fbb3e:36655 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '2907d75fbb3e,36655,1733787976159' 2024-12-09T23:46:17,740 DEBUG [RS:0;2907d75fbb3e:36655 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-09T23:46:17,740 DEBUG [RS:2;2907d75fbb3e:39769 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 2907d75fbb3e,39769,1733787976325 2024-12-09T23:46:17,740 DEBUG [RS:2;2907d75fbb3e:39769 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '2907d75fbb3e,39769,1733787976325' 2024-12-09T23:46:17,740 DEBUG [RS:2;2907d75fbb3e:39769 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-09T23:46:17,741 DEBUG [RS:0;2907d75fbb3e:36655 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-09T23:46:17,741 DEBUG [RS:2;2907d75fbb3e:39769 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-09T23:46:17,741 INFO [RS:1;2907d75fbb3e:46803 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T23:46:17,741 INFO [RS:1;2907d75fbb3e:46803 {}] regionserver.HRegionServer(1482): Serving as 2907d75fbb3e,46803,1733787976254, RpcServer on 2907d75fbb3e/172.17.0.2:46803, sessionid=0x1000d06bb0c0002 2024-12-09T23:46:17,741 DEBUG [RS:0;2907d75fbb3e:36655 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-09T23:46:17,741 DEBUG [RS:1;2907d75fbb3e:46803 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-09T23:46:17,741 DEBUG [RS:2;2907d75fbb3e:39769 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-09T23:46:17,741 DEBUG [RS:0;2907d75fbb3e:36655 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-09T23:46:17,741 DEBUG [RS:1;2907d75fbb3e:46803 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 2907d75fbb3e,46803,1733787976254 2024-12-09T23:46:17,742 DEBUG [RS:0;2907d75fbb3e:36655 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 2907d75fbb3e,36655,1733787976159 2024-12-09T23:46:17,742 DEBUG [RS:1;2907d75fbb3e:46803 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '2907d75fbb3e,46803,1733787976254' 2024-12-09T23:46:17,742 INFO [RS:2;2907d75fbb3e:39769 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-09T23:46:17,742 DEBUG [RS:0;2907d75fbb3e:36655 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '2907d75fbb3e,36655,1733787976159' 2024-12-09T23:46:17,742 DEBUG [RS:0;2907d75fbb3e:36655 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-09T23:46:17,742 DEBUG [RS:1;2907d75fbb3e:46803 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-09T23:46:17,742 INFO [RS:2;2907d75fbb3e:39769 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-09T23:46:17,742 DEBUG [RS:1;2907d75fbb3e:46803 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-09T23:46:17,742 DEBUG [RS:0;2907d75fbb3e:36655 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-09T23:46:17,743 DEBUG [RS:1;2907d75fbb3e:46803 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-09T23:46:17,743 DEBUG [RS:0;2907d75fbb3e:36655 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-09T23:46:17,743 DEBUG [RS:1;2907d75fbb3e:46803 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-09T23:46:17,743 INFO [RS:0;2907d75fbb3e:36655 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-09T23:46:17,743 DEBUG [RS:1;2907d75fbb3e:46803 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 2907d75fbb3e,46803,1733787976254 2024-12-09T23:46:17,743 INFO [RS:0;2907d75fbb3e:36655 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-09T23:46:17,743 DEBUG [RS:1;2907d75fbb3e:46803 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '2907d75fbb3e,46803,1733787976254' 2024-12-09T23:46:17,743 DEBUG [RS:1;2907d75fbb3e:46803 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-09T23:46:17,745 DEBUG [RS:1;2907d75fbb3e:46803 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-09T23:46:17,746 DEBUG [RS:1;2907d75fbb3e:46803 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-09T23:46:17,746 INFO [RS:1;2907d75fbb3e:46803 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-09T23:46:17,746 INFO [RS:1;2907d75fbb3e:46803 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-09T23:46:17,847 INFO [RS:1;2907d75fbb3e:46803 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-09T23:46:17,847 INFO [RS:0;2907d75fbb3e:36655 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-09T23:46:17,847 INFO [RS:2;2907d75fbb3e:39769 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-09T23:46:17,851 INFO [RS:2;2907d75fbb3e:39769 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=2907d75fbb3e%2C39769%2C1733787976325, suffix=, logDir=hdfs://localhost:37867/user/jenkins/test-data/1c338360-f03d-9fa5-8641-bcfa93b60f00/WALs/2907d75fbb3e,39769,1733787976325, archiveDir=hdfs://localhost:37867/user/jenkins/test-data/1c338360-f03d-9fa5-8641-bcfa93b60f00/oldWALs, maxLogs=32 2024-12-09T23:46:17,851 INFO [RS:0;2907d75fbb3e:36655 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=2907d75fbb3e%2C36655%2C1733787976159, suffix=, logDir=hdfs://localhost:37867/user/jenkins/test-data/1c338360-f03d-9fa5-8641-bcfa93b60f00/WALs/2907d75fbb3e,36655,1733787976159, archiveDir=hdfs://localhost:37867/user/jenkins/test-data/1c338360-f03d-9fa5-8641-bcfa93b60f00/oldWALs, maxLogs=32 2024-12-09T23:46:17,851 INFO [RS:1;2907d75fbb3e:46803 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=2907d75fbb3e%2C46803%2C1733787976254, suffix=, logDir=hdfs://localhost:37867/user/jenkins/test-data/1c338360-f03d-9fa5-8641-bcfa93b60f00/WALs/2907d75fbb3e,46803,1733787976254, archiveDir=hdfs://localhost:37867/user/jenkins/test-data/1c338360-f03d-9fa5-8641-bcfa93b60f00/oldWALs, maxLogs=32 2024-12-09T23:46:17,869 DEBUG [RS:2;2907d75fbb3e:39769 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/1c338360-f03d-9fa5-8641-bcfa93b60f00/WALs/2907d75fbb3e,39769,1733787976325/2907d75fbb3e%2C39769%2C1733787976325.1733787977857, exclude list is [], retry=0 2024-12-09T23:46:17,869 DEBUG [RS:1;2907d75fbb3e:46803 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/1c338360-f03d-9fa5-8641-bcfa93b60f00/WALs/2907d75fbb3e,46803,1733787976254/2907d75fbb3e%2C46803%2C1733787976254.1733787977857, exclude list is [], retry=0 2024-12-09T23:46:17,870 DEBUG [RS:0;2907d75fbb3e:36655 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/1c338360-f03d-9fa5-8641-bcfa93b60f00/WALs/2907d75fbb3e,36655,1733787976159/2907d75fbb3e%2C36655%2C1733787976159.1733787977857, exclude list is [], retry=0 2024-12-09T23:46:17,875 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35729,DS-6c5e70a9-295f-4455-be70-31e472f21fe4,DISK] 2024-12-09T23:46:17,875 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39539,DS-e8617d22-a05c-419e-a033-3e19bdedd3bb,DISK] 2024-12-09T23:46:17,875 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35729,DS-6c5e70a9-295f-4455-be70-31e472f21fe4,DISK] 2024-12-09T23:46:17,875 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35729,DS-6c5e70a9-295f-4455-be70-31e472f21fe4,DISK] 2024-12-09T23:46:17,876 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:41073,DS-d8c9e1ca-b4a4-4c8d-853d-dc76b8edfa36,DISK] 2024-12-09T23:46:17,876 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39539,DS-e8617d22-a05c-419e-a033-3e19bdedd3bb,DISK] 2024-12-09T23:46:17,876 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:41073,DS-d8c9e1ca-b4a4-4c8d-853d-dc76b8edfa36,DISK] 2024-12-09T23:46:17,876 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39539,DS-e8617d22-a05c-419e-a033-3e19bdedd3bb,DISK] 2024-12-09T23:46:17,876 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:41073,DS-d8c9e1ca-b4a4-4c8d-853d-dc76b8edfa36,DISK] 2024-12-09T23:46:17,890 INFO [RS:2;2907d75fbb3e:39769 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/1c338360-f03d-9fa5-8641-bcfa93b60f00/WALs/2907d75fbb3e,39769,1733787976325/2907d75fbb3e%2C39769%2C1733787976325.1733787977857 2024-12-09T23:46:17,892 INFO [RS:0;2907d75fbb3e:36655 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/1c338360-f03d-9fa5-8641-bcfa93b60f00/WALs/2907d75fbb3e,36655,1733787976159/2907d75fbb3e%2C36655%2C1733787976159.1733787977857 2024-12-09T23:46:17,892 DEBUG [RS:2;2907d75fbb3e:39769 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:36527:36527),(127.0.0.1/127.0.0.1:44409:44409),(127.0.0.1/127.0.0.1:45055:45055)] 2024-12-09T23:46:17,892 INFO [RS:1;2907d75fbb3e:46803 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/1c338360-f03d-9fa5-8641-bcfa93b60f00/WALs/2907d75fbb3e,46803,1733787976254/2907d75fbb3e%2C46803%2C1733787976254.1733787977857 2024-12-09T23:46:17,892 DEBUG [RS:0;2907d75fbb3e:36655 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:45055:45055),(127.0.0.1/127.0.0.1:36527:36527),(127.0.0.1/127.0.0.1:44409:44409)] 2024-12-09T23:46:17,892 DEBUG [RS:1;2907d75fbb3e:46803 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:36527:36527),(127.0.0.1/127.0.0.1:44409:44409),(127.0.0.1/127.0.0.1:45055:45055)] 2024-12-09T23:46:17,991 DEBUG [2907d75fbb3e:46061 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=3, allServersCount=3 2024-12-09T23:46:18,001 DEBUG [2907d75fbb3e:46061 {}] balancer.BalancerClusterState(204): Hosts are {2907d75fbb3e=0} racks are {/default-rack=0} 2024-12-09T23:46:18,008 DEBUG [2907d75fbb3e:46061 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-09T23:46:18,008 DEBUG [2907d75fbb3e:46061 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-09T23:46:18,008 DEBUG [2907d75fbb3e:46061 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-09T23:46:18,008 DEBUG [2907d75fbb3e:46061 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-09T23:46:18,008 DEBUG [2907d75fbb3e:46061 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-09T23:46:18,008 DEBUG [2907d75fbb3e:46061 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-09T23:46:18,008 INFO [2907d75fbb3e:46061 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-09T23:46:18,008 INFO [2907d75fbb3e:46061 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-09T23:46:18,008 INFO [2907d75fbb3e:46061 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-09T23:46:18,008 DEBUG [2907d75fbb3e:46061 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-09T23:46:18,015 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=2907d75fbb3e,46803,1733787976254 2024-12-09T23:46:18,020 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 2907d75fbb3e,46803,1733787976254, state=OPENING 2024-12-09T23:46:18,041 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-09T23:46:18,049 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46061-0x1000d06bb0c0000, quorum=127.0.0.1:56064, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T23:46:18,049 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46803-0x1000d06bb0c0002, quorum=127.0.0.1:56064, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T23:46:18,049 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39769-0x1000d06bb0c0003, quorum=127.0.0.1:56064, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T23:46:18,049 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36655-0x1000d06bb0c0001, quorum=127.0.0.1:56064, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T23:46:18,050 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T23:46:18,050 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T23:46:18,051 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T23:46:18,051 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T23:46:18,053 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-09T23:46:18,056 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=2907d75fbb3e,46803,1733787976254}] 2024-12-09T23:46:18,235 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-09T23:46:18,237 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37187, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-09T23:46:18,248 INFO [RS_OPEN_META-regionserver/2907d75fbb3e:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-09T23:46:18,249 INFO [RS_OPEN_META-regionserver/2907d75fbb3e:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-09T23:46:18,249 INFO [RS_OPEN_META-regionserver/2907d75fbb3e:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-12-09T23:46:18,252 INFO [RS_OPEN_META-regionserver/2907d75fbb3e:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=2907d75fbb3e%2C46803%2C1733787976254.meta, suffix=.meta, logDir=hdfs://localhost:37867/user/jenkins/test-data/1c338360-f03d-9fa5-8641-bcfa93b60f00/WALs/2907d75fbb3e,46803,1733787976254, archiveDir=hdfs://localhost:37867/user/jenkins/test-data/1c338360-f03d-9fa5-8641-bcfa93b60f00/oldWALs, maxLogs=32 2024-12-09T23:46:18,267 DEBUG [RS_OPEN_META-regionserver/2907d75fbb3e:0-0 {event_type=M_RS_OPEN_META, pid=3}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/1c338360-f03d-9fa5-8641-bcfa93b60f00/WALs/2907d75fbb3e,46803,1733787976254/2907d75fbb3e%2C46803%2C1733787976254.meta.1733787978254.meta, exclude list is [], retry=0 2024-12-09T23:46:18,270 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39539,DS-e8617d22-a05c-419e-a033-3e19bdedd3bb,DISK] 2024-12-09T23:46:18,270 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35729,DS-6c5e70a9-295f-4455-be70-31e472f21fe4,DISK] 2024-12-09T23:46:18,271 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:41073,DS-d8c9e1ca-b4a4-4c8d-853d-dc76b8edfa36,DISK] 2024-12-09T23:46:18,274 INFO [RS_OPEN_META-regionserver/2907d75fbb3e:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/1c338360-f03d-9fa5-8641-bcfa93b60f00/WALs/2907d75fbb3e,46803,1733787976254/2907d75fbb3e%2C46803%2C1733787976254.meta.1733787978254.meta 2024-12-09T23:46:18,274 DEBUG [RS_OPEN_META-regionserver/2907d75fbb3e:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:45055:45055),(127.0.0.1/127.0.0.1:36527:36527),(127.0.0.1/127.0.0.1:44409:44409)] 2024-12-09T23:46:18,274 DEBUG [RS_OPEN_META-regionserver/2907d75fbb3e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-09T23:46:18,276 DEBUG [RS_OPEN_META-regionserver/2907d75fbb3e:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-09T23:46:18,278 DEBUG [RS_OPEN_META-regionserver/2907d75fbb3e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-09T23:46:18,282 INFO [RS_OPEN_META-regionserver/2907d75fbb3e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-09T23:46:18,286 DEBUG [RS_OPEN_META-regionserver/2907d75fbb3e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-09T23:46:18,286 DEBUG [RS_OPEN_META-regionserver/2907d75fbb3e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T23:46:18,287 DEBUG [RS_OPEN_META-regionserver/2907d75fbb3e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-09T23:46:18,287 DEBUG [RS_OPEN_META-regionserver/2907d75fbb3e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-09T23:46:18,290 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-09T23:46:18,291 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-09T23:46:18,291 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T23:46:18,292 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T23:46:18,292 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-09T23:46:18,294 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-09T23:46:18,294 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T23:46:18,294 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T23:46:18,294 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-09T23:46:18,296 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-09T23:46:18,296 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T23:46:18,297 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T23:46:18,297 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-09T23:46:18,299 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-09T23:46:18,299 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T23:46:18,299 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T23:46:18,300 DEBUG [RS_OPEN_META-regionserver/2907d75fbb3e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-09T23:46:18,301 DEBUG [RS_OPEN_META-regionserver/2907d75fbb3e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37867/user/jenkins/test-data/1c338360-f03d-9fa5-8641-bcfa93b60f00/data/hbase/meta/1588230740 2024-12-09T23:46:18,303 DEBUG [RS_OPEN_META-regionserver/2907d75fbb3e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37867/user/jenkins/test-data/1c338360-f03d-9fa5-8641-bcfa93b60f00/data/hbase/meta/1588230740 2024-12-09T23:46:18,305 DEBUG [RS_OPEN_META-regionserver/2907d75fbb3e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-09T23:46:18,305 DEBUG [RS_OPEN_META-regionserver/2907d75fbb3e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-09T23:46:18,306 DEBUG [RS_OPEN_META-regionserver/2907d75fbb3e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-09T23:46:18,308 DEBUG [RS_OPEN_META-regionserver/2907d75fbb3e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-09T23:46:18,310 INFO [RS_OPEN_META-regionserver/2907d75fbb3e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=62667331, jitterRate=-0.06618399918079376}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-09T23:46:18,310 DEBUG [RS_OPEN_META-regionserver/2907d75fbb3e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-09T23:46:18,312 DEBUG [RS_OPEN_META-regionserver/2907d75fbb3e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733787978287Writing region info on filesystem at 1733787978287Initializing all the Stores at 1733787978289 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733787978289Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733787978289Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733787978289Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733787978289Cleaning up temporary data from old regions at 1733787978305 (+16 ms)Running coprocessor post-open hooks at 1733787978310 (+5 ms)Region opened successfully at 1733787978312 (+2 ms) 2024-12-09T23:46:18,319 INFO [RS_OPEN_META-regionserver/2907d75fbb3e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733787978228 2024-12-09T23:46:18,328 DEBUG [RS_OPEN_META-regionserver/2907d75fbb3e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-09T23:46:18,329 INFO [RS_OPEN_META-regionserver/2907d75fbb3e:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-09T23:46:18,331 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=2907d75fbb3e,46803,1733787976254 2024-12-09T23:46:18,333 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 2907d75fbb3e,46803,1733787976254, state=OPEN 2024-12-09T23:46:18,366 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46803-0x1000d06bb0c0002, quorum=127.0.0.1:56064, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T23:46:18,366 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46061-0x1000d06bb0c0000, quorum=127.0.0.1:56064, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T23:46:18,366 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36655-0x1000d06bb0c0001, quorum=127.0.0.1:56064, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T23:46:18,366 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39769-0x1000d06bb0c0003, quorum=127.0.0.1:56064, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T23:46:18,366 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T23:46:18,366 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T23:46:18,366 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T23:46:18,366 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T23:46:18,367 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=2907d75fbb3e,46803,1733787976254 2024-12-09T23:46:18,375 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-09T23:46:18,375 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=2907d75fbb3e,46803,1733787976254 in 312 msec 2024-12-09T23:46:18,385 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-09T23:46:18,385 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 802 msec 2024-12-09T23:46:18,386 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-09T23:46:18,386 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-09T23:46:18,403 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T23:46:18,404 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=2907d75fbb3e,46803,1733787976254, seqNum=-1] 2024-12-09T23:46:18,421 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T23:46:18,423 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39059, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T23:46:18,463 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.1290 sec 2024-12-09T23:46:18,464 INFO [master/2907d75fbb3e:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733787978464, completionTime=-1 2024-12-09T23:46:18,467 INFO [master/2907d75fbb3e:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=3; waited=0ms, expected min=3 server(s), max=3 server(s), master is running 2024-12-09T23:46:18,467 DEBUG [master/2907d75fbb3e:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-09T23:46:18,501 INFO [master/2907d75fbb3e:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=3 2024-12-09T23:46:18,501 INFO [master/2907d75fbb3e:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733788038501 2024-12-09T23:46:18,501 INFO [master/2907d75fbb3e:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733788098501 2024-12-09T23:46:18,501 INFO [master/2907d75fbb3e:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 33 msec 2024-12-09T23:46:18,502 DEBUG [master/2907d75fbb3e:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 1588230740 changed from -1.0 to 0.0, refreshing cache 2024-12-09T23:46:18,509 INFO [master/2907d75fbb3e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=2907d75fbb3e,46061,1733787975505-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T23:46:18,509 INFO [master/2907d75fbb3e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=2907d75fbb3e,46061,1733787975505-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T23:46:18,509 INFO [master/2907d75fbb3e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=2907d75fbb3e,46061,1733787975505-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T23:46:18,510 INFO [master/2907d75fbb3e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-2907d75fbb3e:46061, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T23:46:18,511 INFO [master/2907d75fbb3e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-09T23:46:18,511 INFO [master/2907d75fbb3e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-09T23:46:18,517 DEBUG [master/2907d75fbb3e:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-09T23:46:18,539 INFO [master/2907d75fbb3e:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 2.082sec 2024-12-09T23:46:18,541 INFO [master/2907d75fbb3e:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-09T23:46:18,542 INFO [master/2907d75fbb3e:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-09T23:46:18,543 INFO [master/2907d75fbb3e:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-09T23:46:18,543 INFO [master/2907d75fbb3e:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-09T23:46:18,543 INFO [master/2907d75fbb3e:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-09T23:46:18,544 INFO [master/2907d75fbb3e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=2907d75fbb3e,46061,1733787975505-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-09T23:46:18,544 INFO [master/2907d75fbb3e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=2907d75fbb3e,46061,1733787975505-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-09T23:46:18,548 DEBUG [master/2907d75fbb3e:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-09T23:46:18,549 INFO [master/2907d75fbb3e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-09T23:46:18,549 INFO [master/2907d75fbb3e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=2907d75fbb3e,46061,1733787975505-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T23:46:18,588 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@68c8e376, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T23:46:18,592 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-12-09T23:46:18,592 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-12-09T23:46:18,595 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 2907d75fbb3e,46061,-1 for getting cluster id 2024-12-09T23:46:18,597 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-09T23:46:18,603 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '8f6e3f4a-b4b0-4707-ac61-7c5b224747e3' 2024-12-09T23:46:18,606 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-09T23:46:18,606 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "8f6e3f4a-b4b0-4707-ac61-7c5b224747e3" 2024-12-09T23:46:18,607 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@bfa94ed, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T23:46:18,607 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [2907d75fbb3e,46061,-1] 2024-12-09T23:46:18,609 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-09T23:46:18,611 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T23:46:18,611 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52130, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-09T23:46:18,614 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1b34441, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T23:46:18,615 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T23:46:18,621 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=2907d75fbb3e,46803,1733787976254, seqNum=-1] 2024-12-09T23:46:18,621 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T23:46:18,624 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35406, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T23:46:18,641 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=2907d75fbb3e,46061,1733787975505 2024-12-09T23:46:18,644 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-09T23:46:18,648 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.AsyncConnectionImpl(321): The fetched master address is 2907d75fbb3e,46061,1733787975505 2024-12-09T23:46:18,650 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@4ecec3c3 2024-12-09T23:46:18,651 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-09T23:46:18,653 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52144, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-09T23:46:18,658 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46061 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-09T23:46:18,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46061 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC 2024-12-09T23:46:18,669 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_PRE_OPERATION 2024-12-09T23:46:18,671 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46061 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestHBaseWalOnEC" procId is: 4 2024-12-09T23:46:18,671 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T23:46:18,674 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-09T23:46:18,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46061 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-09T23:46:18,682 WARN [PEWorker-3 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T23:46:18,682 WARN [PEWorker-3 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T23:46:18,685 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1542472797_22 at /127.0.0.1:55140 [Receiving block BP-1524071419-172.17.0.2-1733787971502:blk_-9223372036854775680_1020] {}] datanode.DataXceiver(331): 127.0.0.1:41073:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:55140 dst: /127.0.0.1:41073 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T23:46:18,689 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41073 is added to blk_-9223372036854775680_1021 (size=392) 2024-12-09T23:46:18,690 WARN [PEWorker-3 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-09T23:46:18,692 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 8d566bf8c3a2280eaecaf75e5323a06a, NAME => 'TestHBaseWalOnEC,,1733787978654.8d566bf8c3a2280eaecaf75e5323a06a.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37867/user/jenkins/test-data/1c338360-f03d-9fa5-8641-bcfa93b60f00 2024-12-09T23:46:18,699 WARN [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T23:46:18,699 WARN [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T23:46:18,704 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1542472797_22 at /127.0.0.1:55168 [Receiving block BP-1524071419-172.17.0.2-1733787971502:blk_-9223372036854775664_1022] {}] datanode.DataXceiver(331): 127.0.0.1:41073:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:55168 dst: /127.0.0.1:41073 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T23:46:18,708 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41073 is added to blk_-9223372036854775664_1023 (size=51) 2024-12-09T23:46:18,709 WARN [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-09T23:46:18,710 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1733787978654.8d566bf8c3a2280eaecaf75e5323a06a.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T23:46:18,710 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1722): Closing 8d566bf8c3a2280eaecaf75e5323a06a, disabling compactions & flushes 2024-12-09T23:46:18,710 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1733787978654.8d566bf8c3a2280eaecaf75e5323a06a. 2024-12-09T23:46:18,710 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1733787978654.8d566bf8c3a2280eaecaf75e5323a06a. 2024-12-09T23:46:18,710 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1733787978654.8d566bf8c3a2280eaecaf75e5323a06a. after waiting 0 ms 2024-12-09T23:46:18,711 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1733787978654.8d566bf8c3a2280eaecaf75e5323a06a. 2024-12-09T23:46:18,711 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1733787978654.8d566bf8c3a2280eaecaf75e5323a06a. 2024-12-09T23:46:18,711 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1676): Region close journal for 8d566bf8c3a2280eaecaf75e5323a06a: Waiting for close lock at 1733787978710Disabling compacts and flushes for region at 1733787978710Disabling writes for close at 1733787978711 (+1 ms)Writing region close event to WAL at 1733787978711Closed at 1733787978711 2024-12-09T23:46:18,713 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ADD_TO_META 2024-12-09T23:46:18,718 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestHBaseWalOnEC,,1733787978654.8d566bf8c3a2280eaecaf75e5323a06a.","families":{"info":[{"qualifier":"regioninfo","vlen":50,"tag":[],"timestamp":"1733787978713"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733787978713"}]},"ts":"1733787978713"} 2024-12-09T23:46:18,723 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-09T23:46:18,724 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-09T23:46:18,727 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733787978725"}]},"ts":"1733787978725"} 2024-12-09T23:46:18,731 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLING in hbase:meta 2024-12-09T23:46:18,732 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {2907d75fbb3e=0} racks are {/default-rack=0} 2024-12-09T23:46:18,733 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-09T23:46:18,733 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-09T23:46:18,733 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-09T23:46:18,733 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-09T23:46:18,733 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-09T23:46:18,733 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-09T23:46:18,733 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-09T23:46:18,733 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-09T23:46:18,733 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-09T23:46:18,733 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-09T23:46:18,734 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=8d566bf8c3a2280eaecaf75e5323a06a, ASSIGN}] 2024-12-09T23:46:18,737 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=8d566bf8c3a2280eaecaf75e5323a06a, ASSIGN 2024-12-09T23:46:18,738 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=8d566bf8c3a2280eaecaf75e5323a06a, ASSIGN; state=OFFLINE, location=2907d75fbb3e,46803,1733787976254; forceNewPlan=false, retain=false 2024-12-09T23:46:18,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46061 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-09T23:46:18,891 INFO [2907d75fbb3e:46061 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-09T23:46:18,892 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=8d566bf8c3a2280eaecaf75e5323a06a, regionState=OPENING, regionLocation=2907d75fbb3e,46803,1733787976254 2024-12-09T23:46:18,896 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=8d566bf8c3a2280eaecaf75e5323a06a, ASSIGN because future has completed 2024-12-09T23:46:18,897 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 8d566bf8c3a2280eaecaf75e5323a06a, server=2907d75fbb3e,46803,1733787976254}] 2024-12-09T23:46:18,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46061 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-09T23:46:19,063 INFO [RS_OPEN_REGION-regionserver/2907d75fbb3e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestHBaseWalOnEC,,1733787978654.8d566bf8c3a2280eaecaf75e5323a06a. 2024-12-09T23:46:19,064 DEBUG [RS_OPEN_REGION-regionserver/2907d75fbb3e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 8d566bf8c3a2280eaecaf75e5323a06a, NAME => 'TestHBaseWalOnEC,,1733787978654.8d566bf8c3a2280eaecaf75e5323a06a.', STARTKEY => '', ENDKEY => ''} 2024-12-09T23:46:19,065 DEBUG [RS_OPEN_REGION-regionserver/2907d75fbb3e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestHBaseWalOnEC 8d566bf8c3a2280eaecaf75e5323a06a 2024-12-09T23:46:19,065 DEBUG [RS_OPEN_REGION-regionserver/2907d75fbb3e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1733787978654.8d566bf8c3a2280eaecaf75e5323a06a.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T23:46:19,065 DEBUG [RS_OPEN_REGION-regionserver/2907d75fbb3e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 8d566bf8c3a2280eaecaf75e5323a06a 2024-12-09T23:46:19,065 DEBUG [RS_OPEN_REGION-regionserver/2907d75fbb3e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 8d566bf8c3a2280eaecaf75e5323a06a 2024-12-09T23:46:19,069 INFO [StoreOpener-8d566bf8c3a2280eaecaf75e5323a06a-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 8d566bf8c3a2280eaecaf75e5323a06a 2024-12-09T23:46:19,072 INFO [StoreOpener-8d566bf8c3a2280eaecaf75e5323a06a-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 8d566bf8c3a2280eaecaf75e5323a06a columnFamilyName cf 2024-12-09T23:46:19,073 DEBUG [StoreOpener-8d566bf8c3a2280eaecaf75e5323a06a-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T23:46:19,074 INFO [StoreOpener-8d566bf8c3a2280eaecaf75e5323a06a-1 {}] regionserver.HStore(327): Store=8d566bf8c3a2280eaecaf75e5323a06a/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T23:46:19,074 DEBUG [RS_OPEN_REGION-regionserver/2907d75fbb3e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 8d566bf8c3a2280eaecaf75e5323a06a 2024-12-09T23:46:19,076 DEBUG [RS_OPEN_REGION-regionserver/2907d75fbb3e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37867/user/jenkins/test-data/1c338360-f03d-9fa5-8641-bcfa93b60f00/data/default/TestHBaseWalOnEC/8d566bf8c3a2280eaecaf75e5323a06a 2024-12-09T23:46:19,077 DEBUG [RS_OPEN_REGION-regionserver/2907d75fbb3e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37867/user/jenkins/test-data/1c338360-f03d-9fa5-8641-bcfa93b60f00/data/default/TestHBaseWalOnEC/8d566bf8c3a2280eaecaf75e5323a06a 2024-12-09T23:46:19,077 DEBUG [RS_OPEN_REGION-regionserver/2907d75fbb3e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 8d566bf8c3a2280eaecaf75e5323a06a 2024-12-09T23:46:19,077 DEBUG [RS_OPEN_REGION-regionserver/2907d75fbb3e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 8d566bf8c3a2280eaecaf75e5323a06a 2024-12-09T23:46:19,080 DEBUG [RS_OPEN_REGION-regionserver/2907d75fbb3e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 8d566bf8c3a2280eaecaf75e5323a06a 2024-12-09T23:46:19,085 DEBUG [RS_OPEN_REGION-regionserver/2907d75fbb3e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37867/user/jenkins/test-data/1c338360-f03d-9fa5-8641-bcfa93b60f00/data/default/TestHBaseWalOnEC/8d566bf8c3a2280eaecaf75e5323a06a/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T23:46:19,086 INFO [RS_OPEN_REGION-regionserver/2907d75fbb3e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 8d566bf8c3a2280eaecaf75e5323a06a; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=62135056, jitterRate=-0.07411551475524902}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-09T23:46:19,086 DEBUG [RS_OPEN_REGION-regionserver/2907d75fbb3e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 8d566bf8c3a2280eaecaf75e5323a06a 2024-12-09T23:46:19,087 DEBUG [RS_OPEN_REGION-regionserver/2907d75fbb3e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 8d566bf8c3a2280eaecaf75e5323a06a: Running coprocessor pre-open hook at 1733787979066Writing region info on filesystem at 1733787979066Initializing all the Stores at 1733787979069 (+3 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733787979069Cleaning up temporary data from old regions at 1733787979077 (+8 ms)Running coprocessor post-open hooks at 1733787979086 (+9 ms)Region opened successfully at 1733787979087 (+1 ms) 2024-12-09T23:46:19,089 INFO [RS_OPEN_REGION-regionserver/2907d75fbb3e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestHBaseWalOnEC,,1733787978654.8d566bf8c3a2280eaecaf75e5323a06a., pid=6, masterSystemTime=1733787979052 2024-12-09T23:46:19,092 DEBUG [RS_OPEN_REGION-regionserver/2907d75fbb3e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestHBaseWalOnEC,,1733787978654.8d566bf8c3a2280eaecaf75e5323a06a. 2024-12-09T23:46:19,092 INFO [RS_OPEN_REGION-regionserver/2907d75fbb3e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestHBaseWalOnEC,,1733787978654.8d566bf8c3a2280eaecaf75e5323a06a. 2024-12-09T23:46:19,093 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=8d566bf8c3a2280eaecaf75e5323a06a, regionState=OPEN, openSeqNum=2, regionLocation=2907d75fbb3e,46803,1733787976254 2024-12-09T23:46:19,097 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 8d566bf8c3a2280eaecaf75e5323a06a, server=2907d75fbb3e,46803,1733787976254 because future has completed 2024-12-09T23:46:19,103 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-09T23:46:19,103 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 8d566bf8c3a2280eaecaf75e5323a06a, server=2907d75fbb3e,46803,1733787976254 in 202 msec 2024-12-09T23:46:19,107 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-09T23:46:19,107 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=8d566bf8c3a2280eaecaf75e5323a06a, ASSIGN in 369 msec 2024-12-09T23:46:19,109 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-09T23:46:19,109 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733787979109"}]},"ts":"1733787979109"} 2024-12-09T23:46:19,112 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLED in hbase:meta 2024-12-09T23:46:19,114 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_POST_OPERATION 2024-12-09T23:46:19,117 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC in 453 msec 2024-12-09T23:46:19,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46061 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-09T23:46:19,310 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table TestHBaseWalOnEC get assigned. Timeout = 60000ms 2024-12-09T23:46:19,310 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestHBaseWalOnEC completed 2024-12-09T23:46:19,311 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-09T23:46:19,316 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table TestHBaseWalOnEC assigned to meta. Checking AM states. 2024-12-09T23:46:19,317 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-09T23:46:19,317 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table TestHBaseWalOnEC assigned. 2024-12-09T23:46:19,324 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestHBaseWalOnEC', row='row', locateType=CURRENT is [region=TestHBaseWalOnEC,,1733787978654.8d566bf8c3a2280eaecaf75e5323a06a., hostname=2907d75fbb3e,46803,1733787976254, seqNum=2] 2024-12-09T23:46:19,332 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46061 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestHBaseWalOnEC 2024-12-09T23:46:19,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46061 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC 2024-12-09T23:46:19,339 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_PREPARE 2024-12-09T23:46:19,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46061 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-09T23:46:19,341 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-09T23:46:19,342 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-09T23:46:19,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46061 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-09T23:46:19,512 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46803 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-12-09T23:46:19,513 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2907d75fbb3e:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestHBaseWalOnEC,,1733787978654.8d566bf8c3a2280eaecaf75e5323a06a. 2024-12-09T23:46:19,516 INFO [RS_FLUSH_OPERATIONS-regionserver/2907d75fbb3e:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing 8d566bf8c3a2280eaecaf75e5323a06a 1/1 column families, dataSize=32 B heapSize=360 B 2024-12-09T23:46:19,563 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2907d75fbb3e:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37867/user/jenkins/test-data/1c338360-f03d-9fa5-8641-bcfa93b60f00/data/default/TestHBaseWalOnEC/8d566bf8c3a2280eaecaf75e5323a06a/.tmp/cf/074fb46fed304d878ea924d462343047 is 36, key is row/cf:cq/1733787979325/Put/seqid=0 2024-12-09T23:46:19,568 WARN [RS_FLUSH_OPERATIONS-regionserver/2907d75fbb3e:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T23:46:19,569 WARN [RS_FLUSH_OPERATIONS-regionserver/2907d75fbb3e:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T23:46:19,573 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1961598923_22 at /127.0.0.1:41816 [Receiving block BP-1524071419-172.17.0.2-1733787971502:blk_-9223372036854775648_1024] {}] datanode.DataXceiver(331): 127.0.0.1:39539:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41816 dst: /127.0.0.1:39539 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T23:46:19,579 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39539 is added to blk_-9223372036854775648_1025 (size=4787) 2024-12-09T23:46:19,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46061 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-09T23:46:19,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46061 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-09T23:46:19,981 WARN [RS_FLUSH_OPERATIONS-regionserver/2907d75fbb3e:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-09T23:46:19,981 INFO [RS_FLUSH_OPERATIONS-regionserver/2907d75fbb3e:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=32 B at sequenceid=5 (bloomFilter=false), to=hdfs://localhost:37867/user/jenkins/test-data/1c338360-f03d-9fa5-8641-bcfa93b60f00/data/default/TestHBaseWalOnEC/8d566bf8c3a2280eaecaf75e5323a06a/.tmp/cf/074fb46fed304d878ea924d462343047 2024-12-09T23:46:20,030 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2907d75fbb3e:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37867/user/jenkins/test-data/1c338360-f03d-9fa5-8641-bcfa93b60f00/data/default/TestHBaseWalOnEC/8d566bf8c3a2280eaecaf75e5323a06a/.tmp/cf/074fb46fed304d878ea924d462343047 as hdfs://localhost:37867/user/jenkins/test-data/1c338360-f03d-9fa5-8641-bcfa93b60f00/data/default/TestHBaseWalOnEC/8d566bf8c3a2280eaecaf75e5323a06a/cf/074fb46fed304d878ea924d462343047 2024-12-09T23:46:20,041 INFO [RS_FLUSH_OPERATIONS-regionserver/2907d75fbb3e:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37867/user/jenkins/test-data/1c338360-f03d-9fa5-8641-bcfa93b60f00/data/default/TestHBaseWalOnEC/8d566bf8c3a2280eaecaf75e5323a06a/cf/074fb46fed304d878ea924d462343047, entries=1, sequenceid=5, filesize=4.7 K 2024-12-09T23:46:20,047 INFO [RS_FLUSH_OPERATIONS-regionserver/2907d75fbb3e:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~32 B/32, heapSize ~344 B/344, currentSize=0 B/0 for 8d566bf8c3a2280eaecaf75e5323a06a in 530ms, sequenceid=5, compaction requested=false 2024-12-09T23:46:20,049 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2907d75fbb3e:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestHBaseWalOnEC' 2024-12-09T23:46:20,051 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2907d75fbb3e:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for 8d566bf8c3a2280eaecaf75e5323a06a: 2024-12-09T23:46:20,051 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2907d75fbb3e:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestHBaseWalOnEC,,1733787978654.8d566bf8c3a2280eaecaf75e5323a06a. 2024-12-09T23:46:20,053 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2907d75fbb3e:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-12-09T23:46:20,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46061 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-12-09T23:46:20,060 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-12-09T23:46:20,060 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 714 msec 2024-12-09T23:46:20,064 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC in 728 msec 2024-12-09T23:46:20,474 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39539 is added to blk_-9223372036854775757_1006 (size=196) 2024-12-09T23:46:20,474 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35729 is added to blk_-9223372036854775756_1006 (size=196) 2024-12-09T23:46:20,475 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35729 is added to blk_-9223372036854775740_1008 (size=1189) 2024-12-09T23:46:20,475 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39539 is added to blk_-9223372036854775741_1008 (size=1189) 2024-12-09T23:46:20,476 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35729 is added to blk_-9223372036854775773_1004 (size=42) 2024-12-09T23:46:20,477 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35729 is added to blk_-9223372036854775725_1010 (size=34) 2024-12-09T23:46:20,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46061 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-09T23:46:20,479 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestHBaseWalOnEC completed 2024-12-09T23:46:20,480 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41073 is added to blk_-9223372036854775772_1004 (size=42) 2024-12-09T23:46:20,480 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41073 is added to blk_-9223372036854775724_1010 (size=34) 2024-12-09T23:46:20,491 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41073 is added to blk_-9223372036854775693_1015 (size=32) 2024-12-09T23:46:20,492 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39539 is added to blk_-9223372036854775692_1015 (size=32) 2024-12-09T23:46:20,492 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-09T23:46:20,492 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-09T23:46:20,492 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T23:46:20,496 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T23:46:20,497 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T23:46:20,497 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-09T23:46:20,497 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-09T23:46:20,497 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=216153882, stopped=false 2024-12-09T23:46:20,497 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=2907d75fbb3e,46061,1733787975505 2024-12-09T23:46:20,549 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46061-0x1000d06bb0c0000, quorum=127.0.0.1:56064, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-09T23:46:20,549 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46803-0x1000d06bb0c0002, quorum=127.0.0.1:56064, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-09T23:46:20,549 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36655-0x1000d06bb0c0001, quorum=127.0.0.1:56064, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-09T23:46:20,549 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39769-0x1000d06bb0c0003, quorum=127.0.0.1:56064, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-09T23:46:20,549 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46061-0x1000d06bb0c0000, quorum=127.0.0.1:56064, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T23:46:20,549 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36655-0x1000d06bb0c0001, quorum=127.0.0.1:56064, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T23:46:20,549 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-09T23:46:20,549 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39769-0x1000d06bb0c0003, quorum=127.0.0.1:56064, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T23:46:20,549 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46803-0x1000d06bb0c0002, quorum=127.0.0.1:56064, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T23:46:20,550 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-09T23:46:20,550 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T23:46:20,551 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T23:46:20,551 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:36655-0x1000d06bb0c0001, quorum=127.0.0.1:56064, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T23:46:20,551 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:39769-0x1000d06bb0c0003, quorum=127.0.0.1:56064, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T23:46:20,551 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:46803-0x1000d06bb0c0002, quorum=127.0.0.1:56064, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T23:46:20,551 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '2907d75fbb3e,36655,1733787976159' ***** 2024-12-09T23:46:20,551 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-09T23:46:20,551 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '2907d75fbb3e,46803,1733787976254' ***** 2024-12-09T23:46:20,552 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:46061-0x1000d06bb0c0000, quorum=127.0.0.1:56064, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T23:46:20,552 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-09T23:46:20,552 INFO [RS:1;2907d75fbb3e:46803 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-09T23:46:20,552 INFO [RS:0;2907d75fbb3e:36655 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-09T23:46:20,552 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '2907d75fbb3e,39769,1733787976325' ***** 2024-12-09T23:46:20,553 INFO [RS:0;2907d75fbb3e:36655 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-09T23:46:20,553 INFO [RS:1;2907d75fbb3e:46803 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-09T23:46:20,553 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-09T23:46:20,553 INFO [RS:1;2907d75fbb3e:46803 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-09T23:46:20,553 INFO [RS:0;2907d75fbb3e:36655 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-09T23:46:20,553 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-09T23:46:20,553 INFO [RS:0;2907d75fbb3e:36655 {}] regionserver.HRegionServer(959): stopping server 2907d75fbb3e,36655,1733787976159 2024-12-09T23:46:20,553 INFO [RS:1;2907d75fbb3e:46803 {}] regionserver.HRegionServer(3091): Received CLOSE for 8d566bf8c3a2280eaecaf75e5323a06a 2024-12-09T23:46:20,553 INFO [RS:0;2907d75fbb3e:36655 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-09T23:46:20,553 INFO [RS:0;2907d75fbb3e:36655 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;2907d75fbb3e:36655. 2024-12-09T23:46:20,554 DEBUG [RS:0;2907d75fbb3e:36655 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T23:46:20,554 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-09T23:46:20,554 DEBUG [RS:0;2907d75fbb3e:36655 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T23:46:20,554 INFO [RS:1;2907d75fbb3e:46803 {}] regionserver.HRegionServer(959): stopping server 2907d75fbb3e,46803,1733787976254 2024-12-09T23:46:20,554 INFO [RS:1;2907d75fbb3e:46803 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-09T23:46:20,554 INFO [RS:2;2907d75fbb3e:39769 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-09T23:46:20,554 INFO [RS:1;2907d75fbb3e:46803 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;2907d75fbb3e:46803. 2024-12-09T23:46:20,554 INFO [RS:0;2907d75fbb3e:36655 {}] regionserver.HRegionServer(976): stopping server 2907d75fbb3e,36655,1733787976159; all regions closed. 2024-12-09T23:46:20,554 INFO [RS:2;2907d75fbb3e:39769 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-09T23:46:20,554 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-09T23:46:20,554 DEBUG [RS:1;2907d75fbb3e:46803 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T23:46:20,554 DEBUG [RS:1;2907d75fbb3e:46803 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T23:46:20,554 INFO [RS:2;2907d75fbb3e:39769 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-09T23:46:20,555 INFO [RS:2;2907d75fbb3e:39769 {}] regionserver.HRegionServer(959): stopping server 2907d75fbb3e,39769,1733787976325 2024-12-09T23:46:20,555 INFO [RS:2;2907d75fbb3e:39769 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-09T23:46:20,555 DEBUG [RS_CLOSE_REGION-regionserver/2907d75fbb3e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 8d566bf8c3a2280eaecaf75e5323a06a, disabling compactions & flushes 2024-12-09T23:46:20,555 INFO [RS:1;2907d75fbb3e:46803 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-09T23:46:20,555 INFO [RS:2;2907d75fbb3e:39769 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:2;2907d75fbb3e:39769. 2024-12-09T23:46:20,555 INFO [RS_CLOSE_REGION-regionserver/2907d75fbb3e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1733787978654.8d566bf8c3a2280eaecaf75e5323a06a. 2024-12-09T23:46:20,555 INFO [RS:1;2907d75fbb3e:46803 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-09T23:46:20,555 DEBUG [RS:2;2907d75fbb3e:39769 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T23:46:20,555 INFO [RS:1;2907d75fbb3e:46803 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-09T23:46:20,555 DEBUG [RS_CLOSE_REGION-regionserver/2907d75fbb3e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1733787978654.8d566bf8c3a2280eaecaf75e5323a06a. 2024-12-09T23:46:20,555 DEBUG [RS:2;2907d75fbb3e:39769 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T23:46:20,555 DEBUG [RS_CLOSE_REGION-regionserver/2907d75fbb3e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1733787978654.8d566bf8c3a2280eaecaf75e5323a06a. after waiting 0 ms 2024-12-09T23:46:20,555 INFO [RS:1;2907d75fbb3e:46803 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-09T23:46:20,555 INFO [RS:2;2907d75fbb3e:39769 {}] regionserver.HRegionServer(976): stopping server 2907d75fbb3e,39769,1733787976325; all regions closed. 2024-12-09T23:46:20,555 DEBUG [RS_CLOSE_REGION-regionserver/2907d75fbb3e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1733787978654.8d566bf8c3a2280eaecaf75e5323a06a. 2024-12-09T23:46:20,556 INFO [RS:1;2907d75fbb3e:46803 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-12-09T23:46:20,556 DEBUG [RS_CLOSE_META-regionserver/2907d75fbb3e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-09T23:46:20,556 DEBUG [RS:1;2907d75fbb3e:46803 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, 8d566bf8c3a2280eaecaf75e5323a06a=TestHBaseWalOnEC,,1733787978654.8d566bf8c3a2280eaecaf75e5323a06a.} 2024-12-09T23:46:20,556 INFO [RS_CLOSE_META-regionserver/2907d75fbb3e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-09T23:46:20,556 DEBUG [RS_CLOSE_META-regionserver/2907d75fbb3e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-09T23:46:20,556 DEBUG [RS_CLOSE_META-regionserver/2907d75fbb3e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-09T23:46:20,556 DEBUG [RS_CLOSE_META-regionserver/2907d75fbb3e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-09T23:46:20,557 DEBUG [RS:1;2907d75fbb3e:46803 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 8d566bf8c3a2280eaecaf75e5323a06a 2024-12-09T23:46:20,557 INFO [RS_CLOSE_META-regionserver/2907d75fbb3e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.34 KB heapSize=3.38 KB 2024-12-09T23:46:20,564 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39539 is added to blk_1073741827_1017 (size=93) 2024-12-09T23:46:20,564 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41073 is added to blk_1073741827_1017 (size=93) 2024-12-09T23:46:20,565 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41073 is added to blk_1073741828_1018 (size=93) 2024-12-09T23:46:20,565 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35729 is added to blk_1073741827_1017 (size=93) 2024-12-09T23:46:20,565 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39539 is added to blk_1073741828_1018 (size=93) 2024-12-09T23:46:20,565 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35729 is added to blk_1073741828_1018 (size=93) 2024-12-09T23:46:20,571 DEBUG [RS:2;2907d75fbb3e:39769 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/1c338360-f03d-9fa5-8641-bcfa93b60f00/oldWALs 2024-12-09T23:46:20,571 DEBUG [RS:0;2907d75fbb3e:36655 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/1c338360-f03d-9fa5-8641-bcfa93b60f00/oldWALs 2024-12-09T23:46:20,571 INFO [RS:0;2907d75fbb3e:36655 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 2907d75fbb3e%2C36655%2C1733787976159:(num 1733787977857) 2024-12-09T23:46:20,571 INFO [RS:2;2907d75fbb3e:39769 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 2907d75fbb3e%2C39769%2C1733787976325:(num 1733787977857) 2024-12-09T23:46:20,571 DEBUG [RS:0;2907d75fbb3e:36655 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T23:46:20,571 DEBUG [RS:2;2907d75fbb3e:39769 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T23:46:20,571 INFO [RS:2;2907d75fbb3e:39769 {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T23:46:20,571 INFO [RS:0;2907d75fbb3e:36655 {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T23:46:20,571 INFO [RS:2;2907d75fbb3e:39769 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-09T23:46:20,571 INFO [RS:0;2907d75fbb3e:36655 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-09T23:46:20,572 INFO [RS:0;2907d75fbb3e:36655 {}] hbase.ChoreService(370): Chore service for: regionserver/2907d75fbb3e:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-09T23:46:20,572 INFO [RS:2;2907d75fbb3e:39769 {}] hbase.ChoreService(370): Chore service for: regionserver/2907d75fbb3e:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-09T23:46:20,572 INFO [RS:2;2907d75fbb3e:39769 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-09T23:46:20,572 INFO [RS:0;2907d75fbb3e:36655 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-09T23:46:20,572 INFO [regionserver/2907d75fbb3e:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-09T23:46:20,572 INFO [RS:0;2907d75fbb3e:36655 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-09T23:46:20,572 INFO [RS:2;2907d75fbb3e:39769 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-09T23:46:20,572 INFO [regionserver/2907d75fbb3e:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-09T23:46:20,572 INFO [RS:0;2907d75fbb3e:36655 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-09T23:46:20,572 INFO [RS:2;2907d75fbb3e:39769 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-09T23:46:20,572 INFO [RS:2;2907d75fbb3e:39769 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-09T23:46:20,572 INFO [RS:0;2907d75fbb3e:36655 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-09T23:46:20,572 INFO [RS:0;2907d75fbb3e:36655 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:36655 2024-12-09T23:46:20,572 INFO [RS:2;2907d75fbb3e:39769 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:39769 2024-12-09T23:46:20,580 DEBUG [RS_CLOSE_REGION-regionserver/2907d75fbb3e:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37867/user/jenkins/test-data/1c338360-f03d-9fa5-8641-bcfa93b60f00/data/default/TestHBaseWalOnEC/8d566bf8c3a2280eaecaf75e5323a06a/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-09T23:46:20,581 INFO [RS_CLOSE_REGION-regionserver/2907d75fbb3e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1733787978654.8d566bf8c3a2280eaecaf75e5323a06a. 2024-12-09T23:46:20,582 DEBUG [RS_CLOSE_REGION-regionserver/2907d75fbb3e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 8d566bf8c3a2280eaecaf75e5323a06a: Waiting for close lock at 1733787980554Running coprocessor pre-close hooks at 1733787980555 (+1 ms)Disabling compacts and flushes for region at 1733787980555Disabling writes for close at 1733787980555Writing region close event to WAL at 1733787980557 (+2 ms)Running coprocessor post-close hooks at 1733787980581 (+24 ms)Closed at 1733787980581 2024-12-09T23:46:20,582 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46061-0x1000d06bb0c0000, quorum=127.0.0.1:56064, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-09T23:46:20,582 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36655-0x1000d06bb0c0001, quorum=127.0.0.1:56064, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/2907d75fbb3e,36655,1733787976159 2024-12-09T23:46:20,582 INFO [RS:0;2907d75fbb3e:36655 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-09T23:46:20,582 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39769-0x1000d06bb0c0003, quorum=127.0.0.1:56064, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/2907d75fbb3e,39769,1733787976325 2024-12-09T23:46:20,582 INFO [RS:2;2907d75fbb3e:39769 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-09T23:46:20,582 DEBUG [RS_CLOSE_REGION-regionserver/2907d75fbb3e:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestHBaseWalOnEC,,1733787978654.8d566bf8c3a2280eaecaf75e5323a06a. 2024-12-09T23:46:20,583 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [2907d75fbb3e,36655,1733787976159] 2024-12-09T23:46:20,590 INFO [regionserver/2907d75fbb3e:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T23:46:20,590 INFO [regionserver/2907d75fbb3e:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T23:46:20,595 DEBUG [RS_CLOSE_META-regionserver/2907d75fbb3e:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37867/user/jenkins/test-data/1c338360-f03d-9fa5-8641-bcfa93b60f00/data/hbase/meta/1588230740/.tmp/info/9c71f6d5d7ee4fe790f941b74827365e is 153, key is TestHBaseWalOnEC,,1733787978654.8d566bf8c3a2280eaecaf75e5323a06a./info:regioninfo/1733787979093/Put/seqid=0 2024-12-09T23:46:20,597 INFO [regionserver/2907d75fbb3e:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T23:46:20,598 WARN [RS_CLOSE_META-regionserver/2907d75fbb3e:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T23:46:20,598 WARN [RS_CLOSE_META-regionserver/2907d75fbb3e:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T23:46:20,598 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/2907d75fbb3e,36655,1733787976159 already deleted, retry=false 2024-12-09T23:46:20,598 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 2907d75fbb3e,36655,1733787976159 expired; onlineServers=2 2024-12-09T23:46:20,599 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [2907d75fbb3e,39769,1733787976325] 2024-12-09T23:46:20,602 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1961598923_22 at /127.0.0.1:41890 [Receiving block BP-1524071419-172.17.0.2-1733787971502:blk_-9223372036854775632_1026] {}] datanode.DataXceiver(331): 127.0.0.1:39539:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41890 dst: /127.0.0.1:39539 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T23:46:20,605 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39539 is added to blk_-9223372036854775632_1027 (size=6637) 2024-12-09T23:46:20,606 WARN [RS_CLOSE_META-regionserver/2907d75fbb3e:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-09T23:46:20,606 INFO [RS_CLOSE_META-regionserver/2907d75fbb3e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.18 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:37867/user/jenkins/test-data/1c338360-f03d-9fa5-8641-bcfa93b60f00/data/hbase/meta/1588230740/.tmp/info/9c71f6d5d7ee4fe790f941b74827365e 2024-12-09T23:46:20,606 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/2907d75fbb3e,39769,1733787976325 already deleted, retry=false 2024-12-09T23:46:20,607 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 2907d75fbb3e,39769,1733787976325 expired; onlineServers=1 2024-12-09T23:46:20,630 DEBUG [RS_CLOSE_META-regionserver/2907d75fbb3e:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37867/user/jenkins/test-data/1c338360-f03d-9fa5-8641-bcfa93b60f00/data/hbase/meta/1588230740/.tmp/ns/59adb192d0454792a1649867884feffa is 43, key is default/ns:d/1733787978426/Put/seqid=0 2024-12-09T23:46:20,633 WARN [RS_CLOSE_META-regionserver/2907d75fbb3e:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T23:46:20,633 WARN [RS_CLOSE_META-regionserver/2907d75fbb3e:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T23:46:20,636 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1961598923_22 at /127.0.0.1:55236 [Receiving block BP-1524071419-172.17.0.2-1733787971502:blk_-9223372036854775616_1028] {}] datanode.DataXceiver(331): 127.0.0.1:41073:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:55236 dst: /127.0.0.1:41073 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T23:46:20,640 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41073 is added to blk_-9223372036854775616_1029 (size=5153) 2024-12-09T23:46:20,640 WARN [RS_CLOSE_META-regionserver/2907d75fbb3e:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-09T23:46:20,641 INFO [RS_CLOSE_META-regionserver/2907d75fbb3e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:37867/user/jenkins/test-data/1c338360-f03d-9fa5-8641-bcfa93b60f00/data/hbase/meta/1588230740/.tmp/ns/59adb192d0454792a1649867884feffa 2024-12-09T23:46:20,664 DEBUG [RS_CLOSE_META-regionserver/2907d75fbb3e:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37867/user/jenkins/test-data/1c338360-f03d-9fa5-8641-bcfa93b60f00/data/hbase/meta/1588230740/.tmp/table/d9c97c39c3324399bec13819440cd6a9 is 52, key is TestHBaseWalOnEC/table:state/1733787979109/Put/seqid=0 2024-12-09T23:46:20,666 WARN [RS_CLOSE_META-regionserver/2907d75fbb3e:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T23:46:20,666 WARN [RS_CLOSE_META-regionserver/2907d75fbb3e:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T23:46:20,669 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1961598923_22 at /127.0.0.1:41902 [Receiving block BP-1524071419-172.17.0.2-1733787971502:blk_-9223372036854775600_1030] {}] datanode.DataXceiver(331): 127.0.0.1:39539:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41902 dst: /127.0.0.1:39539 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T23:46:20,674 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39539 is added to blk_-9223372036854775600_1031 (size=5249) 2024-12-09T23:46:20,674 WARN [RS_CLOSE_META-regionserver/2907d75fbb3e:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-09T23:46:20,675 INFO [RS_CLOSE_META-regionserver/2907d75fbb3e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=96 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:37867/user/jenkins/test-data/1c338360-f03d-9fa5-8641-bcfa93b60f00/data/hbase/meta/1588230740/.tmp/table/d9c97c39c3324399bec13819440cd6a9 2024-12-09T23:46:20,684 DEBUG [RS_CLOSE_META-regionserver/2907d75fbb3e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37867/user/jenkins/test-data/1c338360-f03d-9fa5-8641-bcfa93b60f00/data/hbase/meta/1588230740/.tmp/info/9c71f6d5d7ee4fe790f941b74827365e as hdfs://localhost:37867/user/jenkins/test-data/1c338360-f03d-9fa5-8641-bcfa93b60f00/data/hbase/meta/1588230740/info/9c71f6d5d7ee4fe790f941b74827365e 2024-12-09T23:46:20,691 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36655-0x1000d06bb0c0001, quorum=127.0.0.1:56064, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T23:46:20,691 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39769-0x1000d06bb0c0003, quorum=127.0.0.1:56064, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T23:46:20,691 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39769-0x1000d06bb0c0003, quorum=127.0.0.1:56064, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T23:46:20,691 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36655-0x1000d06bb0c0001, quorum=127.0.0.1:56064, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T23:46:20,691 INFO [RS:0;2907d75fbb3e:36655 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-09T23:46:20,691 INFO [RS:2;2907d75fbb3e:39769 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-09T23:46:20,691 INFO [RS:0;2907d75fbb3e:36655 {}] regionserver.HRegionServer(1031): Exiting; stopping=2907d75fbb3e,36655,1733787976159; zookeeper connection closed. 2024-12-09T23:46:20,691 INFO [RS:2;2907d75fbb3e:39769 {}] regionserver.HRegionServer(1031): Exiting; stopping=2907d75fbb3e,39769,1733787976325; zookeeper connection closed. 2024-12-09T23:46:20,692 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@7cb4c403 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@7cb4c403 2024-12-09T23:46:20,692 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@6f1047cf {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@6f1047cf 2024-12-09T23:46:20,692 INFO [regionserver/2907d75fbb3e:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-09T23:46:20,692 INFO [regionserver/2907d75fbb3e:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-09T23:46:20,694 INFO [RS_CLOSE_META-regionserver/2907d75fbb3e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37867/user/jenkins/test-data/1c338360-f03d-9fa5-8641-bcfa93b60f00/data/hbase/meta/1588230740/info/9c71f6d5d7ee4fe790f941b74827365e, entries=10, sequenceid=11, filesize=6.5 K 2024-12-09T23:46:20,696 DEBUG [RS_CLOSE_META-regionserver/2907d75fbb3e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37867/user/jenkins/test-data/1c338360-f03d-9fa5-8641-bcfa93b60f00/data/hbase/meta/1588230740/.tmp/ns/59adb192d0454792a1649867884feffa as hdfs://localhost:37867/user/jenkins/test-data/1c338360-f03d-9fa5-8641-bcfa93b60f00/data/hbase/meta/1588230740/ns/59adb192d0454792a1649867884feffa 2024-12-09T23:46:20,705 INFO [RS_CLOSE_META-regionserver/2907d75fbb3e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37867/user/jenkins/test-data/1c338360-f03d-9fa5-8641-bcfa93b60f00/data/hbase/meta/1588230740/ns/59adb192d0454792a1649867884feffa, entries=2, sequenceid=11, filesize=5.0 K 2024-12-09T23:46:20,706 DEBUG [RS_CLOSE_META-regionserver/2907d75fbb3e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37867/user/jenkins/test-data/1c338360-f03d-9fa5-8641-bcfa93b60f00/data/hbase/meta/1588230740/.tmp/table/d9c97c39c3324399bec13819440cd6a9 as hdfs://localhost:37867/user/jenkins/test-data/1c338360-f03d-9fa5-8641-bcfa93b60f00/data/hbase/meta/1588230740/table/d9c97c39c3324399bec13819440cd6a9 2024-12-09T23:46:20,715 INFO [RS_CLOSE_META-regionserver/2907d75fbb3e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37867/user/jenkins/test-data/1c338360-f03d-9fa5-8641-bcfa93b60f00/data/hbase/meta/1588230740/table/d9c97c39c3324399bec13819440cd6a9, entries=2, sequenceid=11, filesize=5.1 K 2024-12-09T23:46:20,717 INFO [RS_CLOSE_META-regionserver/2907d75fbb3e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 161ms, sequenceid=11, compaction requested=false 2024-12-09T23:46:20,717 DEBUG [RS_CLOSE_META-regionserver/2907d75fbb3e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-09T23:46:20,726 DEBUG [RS_CLOSE_META-regionserver/2907d75fbb3e:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37867/user/jenkins/test-data/1c338360-f03d-9fa5-8641-bcfa93b60f00/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-12-09T23:46:20,727 DEBUG [RS_CLOSE_META-regionserver/2907d75fbb3e:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-09T23:46:20,727 INFO [RS_CLOSE_META-regionserver/2907d75fbb3e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-09T23:46:20,727 DEBUG [RS_CLOSE_META-regionserver/2907d75fbb3e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733787980556Running coprocessor pre-close hooks at 1733787980556Disabling compacts and flushes for region at 1733787980556Disabling writes for close at 1733787980556Obtaining lock to block concurrent updates at 1733787980557 (+1 ms)Preparing flush snapshotting stores in 1588230740 at 1733787980557Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1377, getHeapSize=3392, getOffHeapSize=0, getCellsCount=14 at 1733787980557Flushing stores of hbase:meta,,1.1588230740 at 1733787980559 (+2 ms)Flushing 1588230740/info: creating writer at 1733787980559Flushing 1588230740/info: appending metadata at 1733787980592 (+33 ms)Flushing 1588230740/info: closing flushed file at 1733787980593 (+1 ms)Flushing 1588230740/ns: creating writer at 1733787980615 (+22 ms)Flushing 1588230740/ns: appending metadata at 1733787980629 (+14 ms)Flushing 1588230740/ns: closing flushed file at 1733787980629Flushing 1588230740/table: creating writer at 1733787980650 (+21 ms)Flushing 1588230740/table: appending metadata at 1733787980664 (+14 ms)Flushing 1588230740/table: closing flushed file at 1733787980664Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@78bebb0d: reopening flushed file at 1733787980683 (+19 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@21f94b44: reopening flushed file at 1733787980694 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@153ea8eb: reopening flushed file at 1733787980705 (+11 ms)Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 161ms, sequenceid=11, compaction requested=false at 1733787980717 (+12 ms)Writing region close event to WAL at 1733787980719 (+2 ms)Running coprocessor post-close hooks at 1733787980727 (+8 ms)Closed at 1733787980727 2024-12-09T23:46:20,727 DEBUG [RS_CLOSE_META-regionserver/2907d75fbb3e:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-09T23:46:20,757 INFO [RS:1;2907d75fbb3e:46803 {}] regionserver.HRegionServer(976): stopping server 2907d75fbb3e,46803,1733787976254; all regions closed. 2024-12-09T23:46:20,762 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39539 is added to blk_1073741829_1019 (size=2751) 2024-12-09T23:46:20,762 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35729 is added to blk_1073741829_1019 (size=2751) 2024-12-09T23:46:20,762 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41073 is added to blk_1073741829_1019 (size=2751) 2024-12-09T23:46:20,766 DEBUG [RS:1;2907d75fbb3e:46803 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/1c338360-f03d-9fa5-8641-bcfa93b60f00/oldWALs 2024-12-09T23:46:20,766 INFO [RS:1;2907d75fbb3e:46803 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 2907d75fbb3e%2C46803%2C1733787976254.meta:.meta(num 1733787978254) 2024-12-09T23:46:20,769 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41073 is added to blk_1073741826_1016 (size=1298) 2024-12-09T23:46:20,769 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39539 is added to blk_1073741826_1016 (size=1298) 2024-12-09T23:46:20,770 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35729 is added to blk_1073741826_1016 (size=1298) 2024-12-09T23:46:20,772 DEBUG [RS:1;2907d75fbb3e:46803 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/1c338360-f03d-9fa5-8641-bcfa93b60f00/oldWALs 2024-12-09T23:46:20,772 INFO [RS:1;2907d75fbb3e:46803 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 2907d75fbb3e%2C46803%2C1733787976254:(num 1733787977857) 2024-12-09T23:46:20,773 DEBUG [RS:1;2907d75fbb3e:46803 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T23:46:20,773 INFO [RS:1;2907d75fbb3e:46803 {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T23:46:20,773 INFO [RS:1;2907d75fbb3e:46803 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-09T23:46:20,773 INFO [RS:1;2907d75fbb3e:46803 {}] hbase.ChoreService(370): Chore service for: regionserver/2907d75fbb3e:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-09T23:46:20,773 INFO [RS:1;2907d75fbb3e:46803 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-09T23:46:20,773 INFO [regionserver/2907d75fbb3e:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-09T23:46:20,773 INFO [RS:1;2907d75fbb3e:46803 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:46803 2024-12-09T23:46:20,790 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46061-0x1000d06bb0c0000, quorum=127.0.0.1:56064, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-09T23:46:20,790 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46803-0x1000d06bb0c0002, quorum=127.0.0.1:56064, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/2907d75fbb3e,46803,1733787976254 2024-12-09T23:46:20,790 INFO [RS:1;2907d75fbb3e:46803 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-09T23:46:20,790 ERROR [Time-limited test-EventThread {}] zookeeper.ClientCnxn$EventThread(581): Error while calling watcher. java.util.concurrent.RejectedExecutionException: Task org.apache.hadoop.hbase.trace.TraceUtil$$Lambda$373/0x00007fb4608f6e60@174469c8 rejected from java.util.concurrent.ThreadPoolExecutor@5246f544[Terminated, pool size = 0, active threads = 0, queued tasks = 0, completed tasks = 14] at java.util.concurrent.ThreadPoolExecutor$AbortPolicy.rejectedExecution(ThreadPoolExecutor.java:2065) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.reject(ThreadPoolExecutor.java:833) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.execute(ThreadPoolExecutor.java:1365) ~[?:?] at java.util.concurrent.Executors$DelegatedExecutorService.execute(Executors.java:721) ~[?:?] at org.apache.hadoop.hbase.zookeeper.ZKWatcher.process(ZKWatcher.java:613) ~[hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.zookeeper.ClientCnxn$EventThread.processEvent(ClientCnxn.java:579) ~[zookeeper-3.8.4.jar:3.8.4] at org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:554) ~[zookeeper-3.8.4.jar:3.8.4] 2024-12-09T23:46:20,798 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [2907d75fbb3e,46803,1733787976254] 2024-12-09T23:46:20,807 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/2907d75fbb3e,46803,1733787976254 already deleted, retry=false 2024-12-09T23:46:20,807 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 2907d75fbb3e,46803,1733787976254 expired; onlineServers=0 2024-12-09T23:46:20,807 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '2907d75fbb3e,46061,1733787975505' ***** 2024-12-09T23:46:20,807 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-09T23:46:20,807 INFO [M:0;2907d75fbb3e:46061 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-09T23:46:20,807 INFO [M:0;2907d75fbb3e:46061 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-09T23:46:20,808 DEBUG [M:0;2907d75fbb3e:46061 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-09T23:46:20,808 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-09T23:46:20,808 DEBUG [M:0;2907d75fbb3e:46061 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-09T23:46:20,808 DEBUG [master/2907d75fbb3e:0:becomeActiveMaster-HFileCleaner.large.0-1733787977440 {}] cleaner.HFileCleaner(306): Exit Thread[master/2907d75fbb3e:0:becomeActiveMaster-HFileCleaner.large.0-1733787977440,5,FailOnTimeoutGroup] 2024-12-09T23:46:20,808 DEBUG [master/2907d75fbb3e:0:becomeActiveMaster-HFileCleaner.small.0-1733787977442 {}] cleaner.HFileCleaner(306): Exit Thread[master/2907d75fbb3e:0:becomeActiveMaster-HFileCleaner.small.0-1733787977442,5,FailOnTimeoutGroup] 2024-12-09T23:46:20,808 INFO [M:0;2907d75fbb3e:46061 {}] hbase.ChoreService(370): Chore service for: master/2907d75fbb3e:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-09T23:46:20,809 INFO [M:0;2907d75fbb3e:46061 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-09T23:46:20,809 DEBUG [M:0;2907d75fbb3e:46061 {}] master.HMaster(1795): Stopping service threads 2024-12-09T23:46:20,809 INFO [M:0;2907d75fbb3e:46061 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-09T23:46:20,809 INFO [M:0;2907d75fbb3e:46061 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-09T23:46:20,810 INFO [M:0;2907d75fbb3e:46061 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-09T23:46:20,810 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-09T23:46:20,815 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46061-0x1000d06bb0c0000, quorum=127.0.0.1:56064, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-09T23:46:20,815 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46061-0x1000d06bb0c0000, quorum=127.0.0.1:56064, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T23:46:20,815 DEBUG [M:0;2907d75fbb3e:46061 {}] zookeeper.ZKUtil(347): master:46061-0x1000d06bb0c0000, quorum=127.0.0.1:56064, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-09T23:46:20,816 WARN [M:0;2907d75fbb3e:46061 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-09T23:46:20,817 INFO [M:0;2907d75fbb3e:46061 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:37867/user/jenkins/test-data/1c338360-f03d-9fa5-8641-bcfa93b60f00/.lastflushedseqids 2024-12-09T23:46:20,829 WARN [M:0;2907d75fbb3e:46061 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T23:46:20,829 WARN [M:0;2907d75fbb3e:46061 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T23:46:20,831 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1542472797_22 at /127.0.0.1:55258 [Receiving block BP-1524071419-172.17.0.2-1733787971502:blk_-9223372036854775584_1032] {}] datanode.DataXceiver(331): 127.0.0.1:41073:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:55258 dst: /127.0.0.1:41073 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T23:46:20,835 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41073 is added to blk_-9223372036854775584_1033 (size=127) 2024-12-09T23:46:20,836 WARN [M:0;2907d75fbb3e:46061 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-09T23:46:20,836 INFO [M:0;2907d75fbb3e:46061 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-09T23:46:20,836 INFO [M:0;2907d75fbb3e:46061 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-09T23:46:20,836 DEBUG [M:0;2907d75fbb3e:46061 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-09T23:46:20,836 INFO [M:0;2907d75fbb3e:46061 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T23:46:20,837 DEBUG [M:0;2907d75fbb3e:46061 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T23:46:20,837 DEBUG [M:0;2907d75fbb3e:46061 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-09T23:46:20,837 DEBUG [M:0;2907d75fbb3e:46061 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T23:46:20,837 INFO [M:0;2907d75fbb3e:46061 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=26.83 KB heapSize=34.12 KB 2024-12-09T23:46:20,855 DEBUG [M:0;2907d75fbb3e:46061 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37867/user/jenkins/test-data/1c338360-f03d-9fa5-8641-bcfa93b60f00/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/0bd950d52cbf4568a4af11d197bb55e4 is 82, key is hbase:meta,,1/info:regioninfo/1733787978330/Put/seqid=0 2024-12-09T23:46:20,857 WARN [M:0;2907d75fbb3e:46061 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T23:46:20,857 WARN [M:0;2907d75fbb3e:46061 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T23:46:20,859 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1542472797_22 at /127.0.0.1:55280 [Receiving block BP-1524071419-172.17.0.2-1733787971502:blk_-9223372036854775568_1034] {}] datanode.DataXceiver(331): 127.0.0.1:41073:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:55280 dst: /127.0.0.1:41073 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T23:46:20,863 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41073 is added to blk_-9223372036854775568_1035 (size=5672) 2024-12-09T23:46:20,864 WARN [M:0;2907d75fbb3e:46061 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-09T23:46:20,864 INFO [M:0;2907d75fbb3e:46061 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:37867/user/jenkins/test-data/1c338360-f03d-9fa5-8641-bcfa93b60f00/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/0bd950d52cbf4568a4af11d197bb55e4 2024-12-09T23:46:20,887 DEBUG [M:0;2907d75fbb3e:46061 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37867/user/jenkins/test-data/1c338360-f03d-9fa5-8641-bcfa93b60f00/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/1a0bfa4a61b44aefb492e5c199f61877 is 748, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1733787979116/Put/seqid=0 2024-12-09T23:46:20,889 WARN [M:0;2907d75fbb3e:46061 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T23:46:20,889 WARN [M:0;2907d75fbb3e:46061 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T23:46:20,892 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1542472797_22 at /127.0.0.1:55292 [Receiving block BP-1524071419-172.17.0.2-1733787971502:blk_-9223372036854775552_1036] {}] datanode.DataXceiver(331): 127.0.0.1:39539:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:55292 dst: /127.0.0.1:39539 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T23:46:20,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39539 is added to blk_-9223372036854775552_1037 (size=6439) 2024-12-09T23:46:20,896 WARN [M:0;2907d75fbb3e:46061 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-09T23:46:20,896 INFO [M:0;2907d75fbb3e:46061 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.14 KB at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:37867/user/jenkins/test-data/1c338360-f03d-9fa5-8641-bcfa93b60f00/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/1a0bfa4a61b44aefb492e5c199f61877 2024-12-09T23:46:20,898 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46803-0x1000d06bb0c0002, quorum=127.0.0.1:56064, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T23:46:20,898 INFO [RS:1;2907d75fbb3e:46803 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-09T23:46:20,899 INFO [RS:1;2907d75fbb3e:46803 {}] regionserver.HRegionServer(1031): Exiting; stopping=2907d75fbb3e,46803,1733787976254; zookeeper connection closed. 2024-12-09T23:46:20,899 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46803-0x1000d06bb0c0002, quorum=127.0.0.1:56064, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T23:46:20,899 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@1e3d3832 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@1e3d3832 2024-12-09T23:46:20,899 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 3 regionserver(s) complete 2024-12-09T23:46:20,919 DEBUG [M:0;2907d75fbb3e:46061 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37867/user/jenkins/test-data/1c338360-f03d-9fa5-8641-bcfa93b60f00/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/99ce5fed89ba42e4801502cb04c791e8 is 69, key is 2907d75fbb3e,36655,1733787976159/rs:state/1733787977526/Put/seqid=0 2024-12-09T23:46:20,921 WARN [M:0;2907d75fbb3e:46061 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T23:46:20,921 WARN [M:0;2907d75fbb3e:46061 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T23:46:20,924 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1542472797_22 at /127.0.0.1:38694 [Receiving block BP-1524071419-172.17.0.2-1733787971502:blk_-9223372036854775536_1038] {}] datanode.DataXceiver(331): 127.0.0.1:41073:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:38694 dst: /127.0.0.1:41073 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T23:46:20,927 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41073 is added to blk_-9223372036854775536_1039 (size=5294) 2024-12-09T23:46:20,928 WARN [M:0;2907d75fbb3e:46061 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-09T23:46:20,928 INFO [M:0;2907d75fbb3e:46061 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=195 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:37867/user/jenkins/test-data/1c338360-f03d-9fa5-8641-bcfa93b60f00/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/99ce5fed89ba42e4801502cb04c791e8 2024-12-09T23:46:20,937 DEBUG [M:0;2907d75fbb3e:46061 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37867/user/jenkins/test-data/1c338360-f03d-9fa5-8641-bcfa93b60f00/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/0bd950d52cbf4568a4af11d197bb55e4 as hdfs://localhost:37867/user/jenkins/test-data/1c338360-f03d-9fa5-8641-bcfa93b60f00/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/0bd950d52cbf4568a4af11d197bb55e4 2024-12-09T23:46:20,944 INFO [M:0;2907d75fbb3e:46061 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37867/user/jenkins/test-data/1c338360-f03d-9fa5-8641-bcfa93b60f00/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/0bd950d52cbf4568a4af11d197bb55e4, entries=8, sequenceid=72, filesize=5.5 K 2024-12-09T23:46:20,946 DEBUG [M:0;2907d75fbb3e:46061 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37867/user/jenkins/test-data/1c338360-f03d-9fa5-8641-bcfa93b60f00/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/1a0bfa4a61b44aefb492e5c199f61877 as hdfs://localhost:37867/user/jenkins/test-data/1c338360-f03d-9fa5-8641-bcfa93b60f00/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/1a0bfa4a61b44aefb492e5c199f61877 2024-12-09T23:46:20,955 INFO [M:0;2907d75fbb3e:46061 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37867/user/jenkins/test-data/1c338360-f03d-9fa5-8641-bcfa93b60f00/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/1a0bfa4a61b44aefb492e5c199f61877, entries=8, sequenceid=72, filesize=6.3 K 2024-12-09T23:46:20,957 DEBUG [M:0;2907d75fbb3e:46061 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37867/user/jenkins/test-data/1c338360-f03d-9fa5-8641-bcfa93b60f00/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/99ce5fed89ba42e4801502cb04c791e8 as hdfs://localhost:37867/user/jenkins/test-data/1c338360-f03d-9fa5-8641-bcfa93b60f00/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/99ce5fed89ba42e4801502cb04c791e8 2024-12-09T23:46:20,965 INFO [M:0;2907d75fbb3e:46061 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37867/user/jenkins/test-data/1c338360-f03d-9fa5-8641-bcfa93b60f00/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/99ce5fed89ba42e4801502cb04c791e8, entries=3, sequenceid=72, filesize=5.2 K 2024-12-09T23:46:20,967 INFO [M:0;2907d75fbb3e:46061 {}] regionserver.HRegion(3140): Finished flush of dataSize ~26.83 KB/27471, heapSize ~33.82 KB/34632, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 130ms, sequenceid=72, compaction requested=false 2024-12-09T23:46:20,968 INFO [M:0;2907d75fbb3e:46061 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T23:46:20,968 DEBUG [M:0;2907d75fbb3e:46061 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733787980836Disabling compacts and flushes for region at 1733787980836Disabling writes for close at 1733787980837 (+1 ms)Obtaining lock to block concurrent updates at 1733787980837Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733787980837Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=27471, getHeapSize=34872, getOffHeapSize=0, getCellsCount=85 at 1733787980837Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733787980838 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733787980838Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733787980854 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733787980854Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733787980872 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733787980887 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733787980887Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733787980904 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733787980919 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733787980919Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4d30a913: reopening flushed file at 1733787980935 (+16 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@560dfc1b: reopening flushed file at 1733787980944 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@bb33b9e: reopening flushed file at 1733787980955 (+11 ms)Finished flush of dataSize ~26.83 KB/27471, heapSize ~33.82 KB/34632, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 130ms, sequenceid=72, compaction requested=false at 1733787980967 (+12 ms)Writing region close event to WAL at 1733787980968 (+1 ms)Closed at 1733787980968 2024-12-09T23:46:20,972 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35729 is added to blk_1073741825_1011 (size=32674) 2024-12-09T23:46:20,972 WARN [Close-WAL-Writer-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(650): complete file /user/jenkins/test-data/1c338360-f03d-9fa5-8641-bcfa93b60f00/MasterData/WALs/2907d75fbb3e,46061,1733787975505/2907d75fbb3e%2C46061%2C1733787975505.1733787976778 not finished, retry = 0 2024-12-09T23:46:20,972 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39539 is added to blk_1073741825_1011 (size=32674) 2024-12-09T23:46:20,972 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41073 is added to blk_1073741825_1011 (size=32674) 2024-12-09T23:46:21,074 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-09T23:46:21,074 INFO [M:0;2907d75fbb3e:46061 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-09T23:46:21,074 INFO [M:0;2907d75fbb3e:46061 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:46061 2024-12-09T23:46:21,075 INFO [M:0;2907d75fbb3e:46061 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-09T23:46:21,191 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46061-0x1000d06bb0c0000, quorum=127.0.0.1:56064, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T23:46:21,191 INFO [M:0;2907d75fbb3e:46061 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-09T23:46:21,191 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46061-0x1000d06bb0c0000, quorum=127.0.0.1:56064, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T23:46:21,199 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3114ae69{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T23:46:21,203 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3c70a874{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T23:46:21,203 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T23:46:21,203 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5822645a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T23:46:21,203 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@16cd567f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3ba2f68b-8f6d-c4b0-2008-7c1ba6915ea6/hadoop.log.dir/,STOPPED} 2024-12-09T23:46:21,207 WARN [BP-1524071419-172.17.0.2-1733787971502 heartbeating to localhost/127.0.0.1:37867 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T23:46:21,207 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T23:46:21,207 WARN [BP-1524071419-172.17.0.2-1733787971502 heartbeating to localhost/127.0.0.1:37867 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1524071419-172.17.0.2-1733787971502 (Datanode Uuid 02d055fa-762c-4bf9-b089-11f3c212d05c) service to localhost/127.0.0.1:37867 2024-12-09T23:46:21,207 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T23:46:21,208 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3ba2f68b-8f6d-c4b0-2008-7c1ba6915ea6/cluster_84061776-8be8-4651-1817-94431618e00f/data/data5/current/BP-1524071419-172.17.0.2-1733787971502 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T23:46:21,208 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3ba2f68b-8f6d-c4b0-2008-7c1ba6915ea6/cluster_84061776-8be8-4651-1817-94431618e00f/data/data6/current/BP-1524071419-172.17.0.2-1733787971502 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T23:46:21,209 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T23:46:21,210 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@353955e9{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T23:46:21,211 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@11738cd8{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T23:46:21,211 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T23:46:21,211 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@40eb7053{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T23:46:21,211 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@510fec09{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3ba2f68b-8f6d-c4b0-2008-7c1ba6915ea6/hadoop.log.dir/,STOPPED} 2024-12-09T23:46:21,212 WARN [BP-1524071419-172.17.0.2-1733787971502 heartbeating to localhost/127.0.0.1:37867 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T23:46:21,212 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T23:46:21,212 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T23:46:21,212 WARN [BP-1524071419-172.17.0.2-1733787971502 heartbeating to localhost/127.0.0.1:37867 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1524071419-172.17.0.2-1733787971502 (Datanode Uuid 99a14f3e-adcd-4b13-930e-3e9e5189a466) service to localhost/127.0.0.1:37867 2024-12-09T23:46:21,213 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3ba2f68b-8f6d-c4b0-2008-7c1ba6915ea6/cluster_84061776-8be8-4651-1817-94431618e00f/data/data3/current/BP-1524071419-172.17.0.2-1733787971502 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T23:46:21,213 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3ba2f68b-8f6d-c4b0-2008-7c1ba6915ea6/cluster_84061776-8be8-4651-1817-94431618e00f/data/data4/current/BP-1524071419-172.17.0.2-1733787971502 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T23:46:21,213 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T23:46:21,218 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1b97a472{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T23:46:21,218 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3722a29b{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T23:46:21,218 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T23:46:21,218 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@69893329{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T23:46:21,218 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3a5de9e4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3ba2f68b-8f6d-c4b0-2008-7c1ba6915ea6/hadoop.log.dir/,STOPPED} 2024-12-09T23:46:21,219 WARN [BP-1524071419-172.17.0.2-1733787971502 heartbeating to localhost/127.0.0.1:37867 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T23:46:21,219 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T23:46:21,219 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T23:46:21,219 WARN [BP-1524071419-172.17.0.2-1733787971502 heartbeating to localhost/127.0.0.1:37867 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1524071419-172.17.0.2-1733787971502 (Datanode Uuid 6a308b02-07e4-4d41-8410-7ec60f96a6d2) service to localhost/127.0.0.1:37867 2024-12-09T23:46:21,220 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3ba2f68b-8f6d-c4b0-2008-7c1ba6915ea6/cluster_84061776-8be8-4651-1817-94431618e00f/data/data1/current/BP-1524071419-172.17.0.2-1733787971502 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T23:46:21,220 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3ba2f68b-8f6d-c4b0-2008-7c1ba6915ea6/cluster_84061776-8be8-4651-1817-94431618e00f/data/data2/current/BP-1524071419-172.17.0.2-1733787971502 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T23:46:21,220 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T23:46:21,227 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@62d6efd9{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-09T23:46:21,228 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@353d35a1{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T23:46:21,228 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T23:46:21,228 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@ce709a8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T23:46:21,228 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@760c69c0{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3ba2f68b-8f6d-c4b0-2008-7c1ba6915ea6/hadoop.log.dir/,STOPPED} 2024-12-09T23:46:21,235 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-09T23:46:21,264 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-09T23:46:21,270 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestHBaseWalOnEC#testReadWrite[0] Thread=86 (was 156), OpenFileDescriptor=453 (was 393) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=220 (was 181) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=5127 (was 5436) 2024-12-09T23:46:21,275 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestHBaseWalOnEC#testReadWrite[1] Thread=86, OpenFileDescriptor=453, MaxFileDescriptor=1048576, SystemLoadAverage=220, ProcessCount=11, AvailableMemoryMB=5127 2024-12-09T23:46:21,276 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=3, rsPorts=, rsClass=null, numDataNodes=3, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-09T23:46:21,276 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3ba2f68b-8f6d-c4b0-2008-7c1ba6915ea6/hadoop.log.dir so I do NOT create it in target/test-data/7a4c5582-8dd7-2e07-0b95-3a62344f12d3 2024-12-09T23:46:21,276 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3ba2f68b-8f6d-c4b0-2008-7c1ba6915ea6/hadoop.tmp.dir so I do NOT create it in target/test-data/7a4c5582-8dd7-2e07-0b95-3a62344f12d3 2024-12-09T23:46:21,276 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7a4c5582-8dd7-2e07-0b95-3a62344f12d3/cluster_adc1a7e4-34ab-ed27-bffd-4111344671b0, deleteOnExit=true 2024-12-09T23:46:21,276 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-09T23:46:21,276 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7a4c5582-8dd7-2e07-0b95-3a62344f12d3/test.cache.data in system properties and HBase conf 2024-12-09T23:46:21,276 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7a4c5582-8dd7-2e07-0b95-3a62344f12d3/hadoop.tmp.dir in system properties and HBase conf 2024-12-09T23:46:21,276 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7a4c5582-8dd7-2e07-0b95-3a62344f12d3/hadoop.log.dir in system properties and HBase conf 2024-12-09T23:46:21,276 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7a4c5582-8dd7-2e07-0b95-3a62344f12d3/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-09T23:46:21,277 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7a4c5582-8dd7-2e07-0b95-3a62344f12d3/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-09T23:46:21,277 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-09T23:46:21,277 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-09T23:46:21,277 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7a4c5582-8dd7-2e07-0b95-3a62344f12d3/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-09T23:46:21,277 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7a4c5582-8dd7-2e07-0b95-3a62344f12d3/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-09T23:46:21,277 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7a4c5582-8dd7-2e07-0b95-3a62344f12d3/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-09T23:46:21,277 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7a4c5582-8dd7-2e07-0b95-3a62344f12d3/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-09T23:46:21,277 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7a4c5582-8dd7-2e07-0b95-3a62344f12d3/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-09T23:46:21,277 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7a4c5582-8dd7-2e07-0b95-3a62344f12d3/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-09T23:46:21,277 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7a4c5582-8dd7-2e07-0b95-3a62344f12d3/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-09T23:46:21,278 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7a4c5582-8dd7-2e07-0b95-3a62344f12d3/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-09T23:46:21,278 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7a4c5582-8dd7-2e07-0b95-3a62344f12d3/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-09T23:46:21,278 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7a4c5582-8dd7-2e07-0b95-3a62344f12d3/nfs.dump.dir in system properties and HBase conf 2024-12-09T23:46:21,278 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7a4c5582-8dd7-2e07-0b95-3a62344f12d3/java.io.tmpdir in system properties and HBase conf 2024-12-09T23:46:21,278 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7a4c5582-8dd7-2e07-0b95-3a62344f12d3/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-09T23:46:21,278 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7a4c5582-8dd7-2e07-0b95-3a62344f12d3/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-09T23:46:21,278 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7a4c5582-8dd7-2e07-0b95-3a62344f12d3/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-09T23:46:21,545 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T23:46:21,549 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T23:46:21,551 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T23:46:21,551 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T23:46:21,551 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-09T23:46:21,552 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T23:46:21,552 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@17794d45{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7a4c5582-8dd7-2e07-0b95-3a62344f12d3/hadoop.log.dir/,AVAILABLE} 2024-12-09T23:46:21,553 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4072566{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T23:46:21,645 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@10db3f46{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7a4c5582-8dd7-2e07-0b95-3a62344f12d3/java.io.tmpdir/jetty-localhost-36383-hadoop-hdfs-3_4_1-tests_jar-_-any-78334327237249108/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-09T23:46:21,645 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@43a7f4cb{HTTP/1.1, (http/1.1)}{localhost:36383} 2024-12-09T23:46:21,646 INFO [Time-limited test {}] server.Server(415): Started @11795ms 2024-12-09T23:46:21,842 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T23:46:21,846 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T23:46:21,847 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T23:46:21,847 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T23:46:21,847 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-09T23:46:21,848 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5e23bf16{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7a4c5582-8dd7-2e07-0b95-3a62344f12d3/hadoop.log.dir/,AVAILABLE} 2024-12-09T23:46:21,848 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@68e19264{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T23:46:21,942 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@11cbed31{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7a4c5582-8dd7-2e07-0b95-3a62344f12d3/java.io.tmpdir/jetty-localhost-36017-hadoop-hdfs-3_4_1-tests_jar-_-any-3101745391433244967/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T23:46:21,942 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@11812ea4{HTTP/1.1, (http/1.1)}{localhost:36017} 2024-12-09T23:46:21,942 INFO [Time-limited test {}] server.Server(415): Started @12091ms 2024-12-09T23:46:21,943 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T23:46:21,976 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T23:46:21,980 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T23:46:21,981 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T23:46:21,981 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T23:46:21,981 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-09T23:46:21,981 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5c773fd1{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7a4c5582-8dd7-2e07-0b95-3a62344f12d3/hadoop.log.dir/,AVAILABLE} 2024-12-09T23:46:21,982 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@72785dee{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T23:46:22,075 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@25cc5f4d{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7a4c5582-8dd7-2e07-0b95-3a62344f12d3/java.io.tmpdir/jetty-localhost-41579-hadoop-hdfs-3_4_1-tests_jar-_-any-7220588002471628793/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T23:46:22,075 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6686fe53{HTTP/1.1, (http/1.1)}{localhost:41579} 2024-12-09T23:46:22,075 INFO [Time-limited test {}] server.Server(415): Started @12225ms 2024-12-09T23:46:22,077 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T23:46:22,111 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T23:46:22,114 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T23:46:22,115 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T23:46:22,115 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T23:46:22,115 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-09T23:46:22,115 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@e74bc8b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7a4c5582-8dd7-2e07-0b95-3a62344f12d3/hadoop.log.dir/,AVAILABLE} 2024-12-09T23:46:22,116 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3c97cc8b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T23:46:22,205 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7c3d82b3{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7a4c5582-8dd7-2e07-0b95-3a62344f12d3/java.io.tmpdir/jetty-localhost-32807-hadoop-hdfs-3_4_1-tests_jar-_-any-7072103597523914168/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T23:46:22,205 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@190023f0{HTTP/1.1, (http/1.1)}{localhost:32807} 2024-12-09T23:46:22,205 INFO [Time-limited test {}] server.Server(415): Started @12355ms 2024-12-09T23:46:22,207 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T23:46:22,783 WARN [Thread-565 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7a4c5582-8dd7-2e07-0b95-3a62344f12d3/cluster_adc1a7e4-34ab-ed27-bffd-4111344671b0/data/data2/current/BP-534286281-172.17.0.2-1733787981300/current, will proceed with Du for space computation calculation, 2024-12-09T23:46:22,783 WARN [Thread-564 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7a4c5582-8dd7-2e07-0b95-3a62344f12d3/cluster_adc1a7e4-34ab-ed27-bffd-4111344671b0/data/data1/current/BP-534286281-172.17.0.2-1733787981300/current, will proceed with Du for space computation calculation, 2024-12-09T23:46:22,799 WARN [Thread-505 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T23:46:22,802 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xc82cffaf25b53d2c with lease ID 0x43f3ed3d7a760d5c: Processing first storage report for DS-4785abfb-dda5-4c9e-b032-56c44a2bc296 from datanode DatanodeRegistration(127.0.0.1:39731, datanodeUuid=9d69277e-4fd5-4466-a524-6a703d451e5e, infoPort=46227, infoSecurePort=0, ipcPort=45205, storageInfo=lv=-57;cid=testClusterID;nsid=116512586;c=1733787981300) 2024-12-09T23:46:22,802 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc82cffaf25b53d2c with lease ID 0x43f3ed3d7a760d5c: from storage DS-4785abfb-dda5-4c9e-b032-56c44a2bc296 node DatanodeRegistration(127.0.0.1:39731, datanodeUuid=9d69277e-4fd5-4466-a524-6a703d451e5e, infoPort=46227, infoSecurePort=0, ipcPort=45205, storageInfo=lv=-57;cid=testClusterID;nsid=116512586;c=1733787981300), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T23:46:22,802 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xc82cffaf25b53d2c with lease ID 0x43f3ed3d7a760d5c: Processing first storage report for DS-d4887e0d-ba79-4d8b-ab95-555bc08deeb6 from datanode DatanodeRegistration(127.0.0.1:39731, datanodeUuid=9d69277e-4fd5-4466-a524-6a703d451e5e, infoPort=46227, infoSecurePort=0, ipcPort=45205, storageInfo=lv=-57;cid=testClusterID;nsid=116512586;c=1733787981300) 2024-12-09T23:46:22,802 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc82cffaf25b53d2c with lease ID 0x43f3ed3d7a760d5c: from storage DS-d4887e0d-ba79-4d8b-ab95-555bc08deeb6 node DatanodeRegistration(127.0.0.1:39731, datanodeUuid=9d69277e-4fd5-4466-a524-6a703d451e5e, infoPort=46227, infoSecurePort=0, ipcPort=45205, storageInfo=lv=-57;cid=testClusterID;nsid=116512586;c=1733787981300), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T23:46:23,044 WARN [Thread-576 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7a4c5582-8dd7-2e07-0b95-3a62344f12d3/cluster_adc1a7e4-34ab-ed27-bffd-4111344671b0/data/data3/current/BP-534286281-172.17.0.2-1733787981300/current, will proceed with Du for space computation calculation, 2024-12-09T23:46:23,045 WARN [Thread-577 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7a4c5582-8dd7-2e07-0b95-3a62344f12d3/cluster_adc1a7e4-34ab-ed27-bffd-4111344671b0/data/data4/current/BP-534286281-172.17.0.2-1733787981300/current, will proceed with Du for space computation calculation, 2024-12-09T23:46:23,065 WARN [Thread-528 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T23:46:23,069 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xc18bb08df145083b with lease ID 0x43f3ed3d7a760d5d: Processing first storage report for DS-f8341713-10ad-47c8-91c6-32c1d63ec050 from datanode DatanodeRegistration(127.0.0.1:36613, datanodeUuid=96b9268c-1c16-46f2-8668-71fb6ba87d29, infoPort=46815, infoSecurePort=0, ipcPort=33293, storageInfo=lv=-57;cid=testClusterID;nsid=116512586;c=1733787981300) 2024-12-09T23:46:23,069 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc18bb08df145083b with lease ID 0x43f3ed3d7a760d5d: from storage DS-f8341713-10ad-47c8-91c6-32c1d63ec050 node DatanodeRegistration(127.0.0.1:36613, datanodeUuid=96b9268c-1c16-46f2-8668-71fb6ba87d29, infoPort=46815, infoSecurePort=0, ipcPort=33293, storageInfo=lv=-57;cid=testClusterID;nsid=116512586;c=1733787981300), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-09T23:46:23,069 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xc18bb08df145083b with lease ID 0x43f3ed3d7a760d5d: Processing first storage report for DS-aafd3b23-807b-44c6-b805-f169c4273609 from datanode DatanodeRegistration(127.0.0.1:36613, datanodeUuid=96b9268c-1c16-46f2-8668-71fb6ba87d29, infoPort=46815, infoSecurePort=0, ipcPort=33293, storageInfo=lv=-57;cid=testClusterID;nsid=116512586;c=1733787981300) 2024-12-09T23:46:23,069 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc18bb08df145083b with lease ID 0x43f3ed3d7a760d5d: from storage DS-aafd3b23-807b-44c6-b805-f169c4273609 node DatanodeRegistration(127.0.0.1:36613, datanodeUuid=96b9268c-1c16-46f2-8668-71fb6ba87d29, infoPort=46815, infoSecurePort=0, ipcPort=33293, storageInfo=lv=-57;cid=testClusterID;nsid=116512586;c=1733787981300), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T23:46:23,146 WARN [Thread-588 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7a4c5582-8dd7-2e07-0b95-3a62344f12d3/cluster_adc1a7e4-34ab-ed27-bffd-4111344671b0/data/data6/current/BP-534286281-172.17.0.2-1733787981300/current, will proceed with Du for space computation calculation, 2024-12-09T23:46:23,147 WARN [Thread-587 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7a4c5582-8dd7-2e07-0b95-3a62344f12d3/cluster_adc1a7e4-34ab-ed27-bffd-4111344671b0/data/data5/current/BP-534286281-172.17.0.2-1733787981300/current, will proceed with Du for space computation calculation, 2024-12-09T23:46:23,163 WARN [Thread-550 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T23:46:23,165 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x60418a782e7492fe with lease ID 0x43f3ed3d7a760d5e: Processing first storage report for DS-dd044bde-ec54-4336-989e-d39642f673ab from datanode DatanodeRegistration(127.0.0.1:40249, datanodeUuid=5b161dbd-cecb-417a-81f5-16f8578d89b8, infoPort=37341, infoSecurePort=0, ipcPort=45939, storageInfo=lv=-57;cid=testClusterID;nsid=116512586;c=1733787981300) 2024-12-09T23:46:23,166 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x60418a782e7492fe with lease ID 0x43f3ed3d7a760d5e: from storage DS-dd044bde-ec54-4336-989e-d39642f673ab node DatanodeRegistration(127.0.0.1:40249, datanodeUuid=5b161dbd-cecb-417a-81f5-16f8578d89b8, infoPort=37341, infoSecurePort=0, ipcPort=45939, storageInfo=lv=-57;cid=testClusterID;nsid=116512586;c=1733787981300), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T23:46:23,166 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x60418a782e7492fe with lease ID 0x43f3ed3d7a760d5e: Processing first storage report for DS-658773d5-d1ef-4a83-8f22-1082f66d82a4 from datanode DatanodeRegistration(127.0.0.1:40249, datanodeUuid=5b161dbd-cecb-417a-81f5-16f8578d89b8, infoPort=37341, infoSecurePort=0, ipcPort=45939, storageInfo=lv=-57;cid=testClusterID;nsid=116512586;c=1733787981300) 2024-12-09T23:46:23,166 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x60418a782e7492fe with lease ID 0x43f3ed3d7a760d5e: from storage DS-658773d5-d1ef-4a83-8f22-1082f66d82a4 node DatanodeRegistration(127.0.0.1:40249, datanodeUuid=5b161dbd-cecb-417a-81f5-16f8578d89b8, infoPort=37341, infoSecurePort=0, ipcPort=45939, storageInfo=lv=-57;cid=testClusterID;nsid=116512586;c=1733787981300), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T23:46:23,247 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7a4c5582-8dd7-2e07-0b95-3a62344f12d3 2024-12-09T23:46:23,250 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7a4c5582-8dd7-2e07-0b95-3a62344f12d3/cluster_adc1a7e4-34ab-ed27-bffd-4111344671b0/zookeeper_0, clientPort=51082, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7a4c5582-8dd7-2e07-0b95-3a62344f12d3/cluster_adc1a7e4-34ab-ed27-bffd-4111344671b0/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7a4c5582-8dd7-2e07-0b95-3a62344f12d3/cluster_adc1a7e4-34ab-ed27-bffd-4111344671b0/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-09T23:46:23,251 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=51082 2024-12-09T23:46:23,252 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T23:46:23,254 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T23:46:23,270 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40249 is added to blk_1073741825_1001 (size=7) 2024-12-09T23:46:23,270 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36613 is added to blk_1073741825_1001 (size=7) 2024-12-09T23:46:23,271 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39731 is added to blk_1073741825_1001 (size=7) 2024-12-09T23:46:23,272 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:41235/user/jenkins/test-data/3218311d-b9ae-7e67-6809-172703410d7a with version=8 2024-12-09T23:46:23,273 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:37867/user/jenkins/test-data/1c338360-f03d-9fa5-8641-bcfa93b60f00/hbase-staging 2024-12-09T23:46:23,275 INFO [Time-limited test {}] client.ConnectionUtils(128): master/2907d75fbb3e:0 server-side Connection retries=45 2024-12-09T23:46:23,275 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T23:46:23,275 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-09T23:46:23,275 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-09T23:46:23,275 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T23:46:23,275 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-09T23:46:23,275 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-09T23:46:23,275 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-09T23:46:23,276 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:34049 2024-12-09T23:46:23,277 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:34049 connecting to ZooKeeper ensemble=127.0.0.1:51082 2024-12-09T23:46:23,322 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:340490x0, quorum=127.0.0.1:51082, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-09T23:46:23,323 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:34049-0x1000d06dc2e0000 connected 2024-12-09T23:46:23,391 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T23:46:23,395 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T23:46:23,398 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:34049-0x1000d06dc2e0000, quorum=127.0.0.1:51082, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T23:46:23,399 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:41235/user/jenkins/test-data/3218311d-b9ae-7e67-6809-172703410d7a, hbase.cluster.distributed=false 2024-12-09T23:46:23,402 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:34049-0x1000d06dc2e0000, quorum=127.0.0.1:51082, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-09T23:46:23,403 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=34049 2024-12-09T23:46:23,403 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=34049 2024-12-09T23:46:23,403 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=34049 2024-12-09T23:46:23,407 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=34049 2024-12-09T23:46:23,407 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=34049 2024-12-09T23:46:23,420 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/2907d75fbb3e:0 server-side Connection retries=45 2024-12-09T23:46:23,420 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T23:46:23,420 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-09T23:46:23,420 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-09T23:46:23,420 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T23:46:23,420 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-09T23:46:23,420 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-09T23:46:23,420 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-09T23:46:23,421 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:42829 2024-12-09T23:46:23,422 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:42829 connecting to ZooKeeper ensemble=127.0.0.1:51082 2024-12-09T23:46:23,423 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T23:46:23,424 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T23:46:23,432 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:428290x0, quorum=127.0.0.1:51082, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-09T23:46:23,432 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:42829-0x1000d06dc2e0001 connected 2024-12-09T23:46:23,432 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42829-0x1000d06dc2e0001, quorum=127.0.0.1:51082, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T23:46:23,433 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-09T23:46:23,433 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-09T23:46:23,434 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42829-0x1000d06dc2e0001, quorum=127.0.0.1:51082, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-09T23:46:23,435 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42829-0x1000d06dc2e0001, quorum=127.0.0.1:51082, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-09T23:46:23,437 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=42829 2024-12-09T23:46:23,437 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=42829 2024-12-09T23:46:23,437 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=42829 2024-12-09T23:46:23,438 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=42829 2024-12-09T23:46:23,438 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=42829 2024-12-09T23:46:23,451 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/2907d75fbb3e:0 server-side Connection retries=45 2024-12-09T23:46:23,452 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T23:46:23,452 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-09T23:46:23,452 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-09T23:46:23,452 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T23:46:23,452 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-09T23:46:23,452 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-09T23:46:23,452 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-09T23:46:23,453 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:44271 2024-12-09T23:46:23,454 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:44271 connecting to ZooKeeper ensemble=127.0.0.1:51082 2024-12-09T23:46:23,454 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T23:46:23,455 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T23:46:23,465 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:442710x0, quorum=127.0.0.1:51082, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-09T23:46:23,465 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:44271-0x1000d06dc2e0002 connected 2024-12-09T23:46:23,466 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44271-0x1000d06dc2e0002, quorum=127.0.0.1:51082, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T23:46:23,466 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-09T23:46:23,466 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-09T23:46:23,467 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44271-0x1000d06dc2e0002, quorum=127.0.0.1:51082, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-09T23:46:23,469 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44271-0x1000d06dc2e0002, quorum=127.0.0.1:51082, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-09T23:46:23,469 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=44271 2024-12-09T23:46:23,470 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=44271 2024-12-09T23:46:23,470 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=44271 2024-12-09T23:46:23,471 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=44271 2024-12-09T23:46:23,471 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=44271 2024-12-09T23:46:23,487 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/2907d75fbb3e:0 server-side Connection retries=45 2024-12-09T23:46:23,488 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T23:46:23,488 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-09T23:46:23,488 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-09T23:46:23,488 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T23:46:23,488 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-09T23:46:23,488 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-09T23:46:23,488 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-09T23:46:23,489 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:41843 2024-12-09T23:46:23,490 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:41843 connecting to ZooKeeper ensemble=127.0.0.1:51082 2024-12-09T23:46:23,491 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T23:46:23,492 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T23:46:23,505 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:418430x0, quorum=127.0.0.1:51082, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-09T23:46:23,506 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:41843-0x1000d06dc2e0003 connected 2024-12-09T23:46:23,506 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41843-0x1000d06dc2e0003, quorum=127.0.0.1:51082, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T23:46:23,506 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-09T23:46:23,507 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-09T23:46:23,508 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41843-0x1000d06dc2e0003, quorum=127.0.0.1:51082, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-09T23:46:23,510 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41843-0x1000d06dc2e0003, quorum=127.0.0.1:51082, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-09T23:46:23,510 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=41843 2024-12-09T23:46:23,511 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=41843 2024-12-09T23:46:23,515 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=41843 2024-12-09T23:46:23,515 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=41843 2024-12-09T23:46:23,516 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=41843 2024-12-09T23:46:23,530 DEBUG [M:0;2907d75fbb3e:34049 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;2907d75fbb3e:34049 2024-12-09T23:46:23,531 INFO [master/2907d75fbb3e:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/2907d75fbb3e,34049,1733787983274 2024-12-09T23:46:23,540 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34049-0x1000d06dc2e0000, quorum=127.0.0.1:51082, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T23:46:23,540 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44271-0x1000d06dc2e0002, quorum=127.0.0.1:51082, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T23:46:23,540 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41843-0x1000d06dc2e0003, quorum=127.0.0.1:51082, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T23:46:23,540 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42829-0x1000d06dc2e0001, quorum=127.0.0.1:51082, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T23:46:23,540 DEBUG [master/2907d75fbb3e:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:34049-0x1000d06dc2e0000, quorum=127.0.0.1:51082, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/2907d75fbb3e,34049,1733787983274 2024-12-09T23:46:23,548 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44271-0x1000d06dc2e0002, quorum=127.0.0.1:51082, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-09T23:46:23,548 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42829-0x1000d06dc2e0001, quorum=127.0.0.1:51082, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-09T23:46:23,548 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41843-0x1000d06dc2e0003, quorum=127.0.0.1:51082, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-09T23:46:23,548 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34049-0x1000d06dc2e0000, quorum=127.0.0.1:51082, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T23:46:23,548 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44271-0x1000d06dc2e0002, quorum=127.0.0.1:51082, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T23:46:23,548 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42829-0x1000d06dc2e0001, quorum=127.0.0.1:51082, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T23:46:23,548 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41843-0x1000d06dc2e0003, quorum=127.0.0.1:51082, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T23:46:23,549 DEBUG [master/2907d75fbb3e:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:34049-0x1000d06dc2e0000, quorum=127.0.0.1:51082, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-09T23:46:23,550 INFO [master/2907d75fbb3e:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/2907d75fbb3e,34049,1733787983274 from backup master directory 2024-12-09T23:46:23,557 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42829-0x1000d06dc2e0001, quorum=127.0.0.1:51082, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T23:46:23,557 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34049-0x1000d06dc2e0000, quorum=127.0.0.1:51082, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/2907d75fbb3e,34049,1733787983274 2024-12-09T23:46:23,557 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41843-0x1000d06dc2e0003, quorum=127.0.0.1:51082, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T23:46:23,557 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44271-0x1000d06dc2e0002, quorum=127.0.0.1:51082, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T23:46:23,557 WARN [master/2907d75fbb3e:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-09T23:46:23,557 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34049-0x1000d06dc2e0000, quorum=127.0.0.1:51082, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T23:46:23,557 INFO [master/2907d75fbb3e:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=2907d75fbb3e,34049,1733787983274 2024-12-09T23:46:23,564 DEBUG [master/2907d75fbb3e:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:41235/user/jenkins/test-data/3218311d-b9ae-7e67-6809-172703410d7a/hbase.id] with ID: dc0f44c0-a9f7-450f-9452-98a8e23a9892 2024-12-09T23:46:23,564 DEBUG [master/2907d75fbb3e:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:41235/user/jenkins/test-data/3218311d-b9ae-7e67-6809-172703410d7a/.tmp/hbase.id 2024-12-09T23:46:23,572 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39731 is added to blk_1073741826_1002 (size=42) 2024-12-09T23:46:23,572 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40249 is added to blk_1073741826_1002 (size=42) 2024-12-09T23:46:23,573 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36613 is added to blk_1073741826_1002 (size=42) 2024-12-09T23:46:23,573 DEBUG [master/2907d75fbb3e:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:41235/user/jenkins/test-data/3218311d-b9ae-7e67-6809-172703410d7a/.tmp/hbase.id]:[hdfs://localhost:41235/user/jenkins/test-data/3218311d-b9ae-7e67-6809-172703410d7a/hbase.id] 2024-12-09T23:46:23,589 INFO [master/2907d75fbb3e:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T23:46:23,590 INFO [master/2907d75fbb3e:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-09T23:46:23,591 INFO [master/2907d75fbb3e:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-12-09T23:46:23,598 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34049-0x1000d06dc2e0000, quorum=127.0.0.1:51082, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T23:46:23,598 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44271-0x1000d06dc2e0002, quorum=127.0.0.1:51082, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T23:46:23,598 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41843-0x1000d06dc2e0003, quorum=127.0.0.1:51082, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T23:46:23,598 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42829-0x1000d06dc2e0001, quorum=127.0.0.1:51082, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T23:46:23,607 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36613 is added to blk_1073741827_1003 (size=196) 2024-12-09T23:46:23,608 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39731 is added to blk_1073741827_1003 (size=196) 2024-12-09T23:46:23,608 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40249 is added to blk_1073741827_1003 (size=196) 2024-12-09T23:46:23,609 INFO [master/2907d75fbb3e:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-09T23:46:23,609 INFO [master/2907d75fbb3e:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-09T23:46:23,610 INFO [master/2907d75fbb3e:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-09T23:46:23,620 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39731 is added to blk_1073741828_1004 (size=1189) 2024-12-09T23:46:23,620 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40249 is added to blk_1073741828_1004 (size=1189) 2024-12-09T23:46:23,620 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36613 is added to blk_1073741828_1004 (size=1189) 2024-12-09T23:46:23,621 INFO [master/2907d75fbb3e:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:41235/user/jenkins/test-data/3218311d-b9ae-7e67-6809-172703410d7a/MasterData/data/master/store 2024-12-09T23:46:23,630 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36613 is added to blk_1073741829_1005 (size=34) 2024-12-09T23:46:23,630 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39731 is added to blk_1073741829_1005 (size=34) 2024-12-09T23:46:23,630 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40249 is added to blk_1073741829_1005 (size=34) 2024-12-09T23:46:23,631 DEBUG [master/2907d75fbb3e:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T23:46:23,631 DEBUG [master/2907d75fbb3e:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-09T23:46:23,631 INFO [master/2907d75fbb3e:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T23:46:23,631 DEBUG [master/2907d75fbb3e:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T23:46:23,631 DEBUG [master/2907d75fbb3e:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-09T23:46:23,631 DEBUG [master/2907d75fbb3e:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T23:46:23,631 INFO [master/2907d75fbb3e:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T23:46:23,631 DEBUG [master/2907d75fbb3e:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733787983631Disabling compacts and flushes for region at 1733787983631Disabling writes for close at 1733787983631Writing region close event to WAL at 1733787983631Closed at 1733787983631 2024-12-09T23:46:23,632 WARN [master/2907d75fbb3e:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:41235/user/jenkins/test-data/3218311d-b9ae-7e67-6809-172703410d7a/MasterData/data/master/store/.initializing 2024-12-09T23:46:23,632 DEBUG [master/2907d75fbb3e:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:41235/user/jenkins/test-data/3218311d-b9ae-7e67-6809-172703410d7a/MasterData/WALs/2907d75fbb3e,34049,1733787983274 2024-12-09T23:46:23,636 INFO [master/2907d75fbb3e:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=2907d75fbb3e%2C34049%2C1733787983274, suffix=, logDir=hdfs://localhost:41235/user/jenkins/test-data/3218311d-b9ae-7e67-6809-172703410d7a/MasterData/WALs/2907d75fbb3e,34049,1733787983274, archiveDir=hdfs://localhost:41235/user/jenkins/test-data/3218311d-b9ae-7e67-6809-172703410d7a/MasterData/oldWALs, maxLogs=10 2024-12-09T23:46:23,636 INFO [master/2907d75fbb3e:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 2907d75fbb3e%2C34049%2C1733787983274.1733787983636 2024-12-09T23:46:23,645 INFO [master/2907d75fbb3e:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/3218311d-b9ae-7e67-6809-172703410d7a/MasterData/WALs/2907d75fbb3e,34049,1733787983274/2907d75fbb3e%2C34049%2C1733787983274.1733787983636 2024-12-09T23:46:23,647 DEBUG [master/2907d75fbb3e:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46227:46227),(127.0.0.1/127.0.0.1:46815:46815),(127.0.0.1/127.0.0.1:37341:37341)] 2024-12-09T23:46:23,647 DEBUG [master/2907d75fbb3e:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-09T23:46:23,648 DEBUG [master/2907d75fbb3e:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T23:46:23,648 DEBUG [master/2907d75fbb3e:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T23:46:23,648 DEBUG [master/2907d75fbb3e:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T23:46:23,649 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T23:46:23,651 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-09T23:46:23,651 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T23:46:23,652 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T23:46:23,652 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T23:46:23,654 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-09T23:46:23,654 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T23:46:23,654 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T23:46:23,655 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T23:46:23,657 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-09T23:46:23,657 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T23:46:23,658 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T23:46:23,658 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T23:46:23,660 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-09T23:46:23,660 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T23:46:23,661 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T23:46:23,661 DEBUG [master/2907d75fbb3e:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T23:46:23,662 DEBUG [master/2907d75fbb3e:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41235/user/jenkins/test-data/3218311d-b9ae-7e67-6809-172703410d7a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-09T23:46:23,663 DEBUG [master/2907d75fbb3e:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41235/user/jenkins/test-data/3218311d-b9ae-7e67-6809-172703410d7a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-09T23:46:23,665 DEBUG [master/2907d75fbb3e:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T23:46:23,666 DEBUG [master/2907d75fbb3e:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T23:46:23,666 DEBUG [master/2907d75fbb3e:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-09T23:46:23,668 DEBUG [master/2907d75fbb3e:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T23:46:23,671 DEBUG [master/2907d75fbb3e:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41235/user/jenkins/test-data/3218311d-b9ae-7e67-6809-172703410d7a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T23:46:23,672 INFO [master/2907d75fbb3e:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=71440077, jitterRate=0.06454010307788849}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-09T23:46:23,672 DEBUG [master/2907d75fbb3e:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733787983648Initializing all the Stores at 1733787983649 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733787983649Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733787983649Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733787983649Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733787983649Cleaning up temporary data from old regions at 1733787983666 (+17 ms)Region opened successfully at 1733787983672 (+6 ms) 2024-12-09T23:46:23,673 INFO [master/2907d75fbb3e:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-09T23:46:23,677 DEBUG [master/2907d75fbb3e:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6afb6b17, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=2907d75fbb3e/172.17.0.2:0 2024-12-09T23:46:23,678 INFO [master/2907d75fbb3e:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-09T23:46:23,678 INFO [master/2907d75fbb3e:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-09T23:46:23,679 INFO [master/2907d75fbb3e:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-09T23:46:23,679 INFO [master/2907d75fbb3e:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-09T23:46:23,679 INFO [master/2907d75fbb3e:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-12-09T23:46:23,680 INFO [master/2907d75fbb3e:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-12-09T23:46:23,680 INFO [master/2907d75fbb3e:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-09T23:46:23,682 INFO [master/2907d75fbb3e:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-09T23:46:23,683 DEBUG [master/2907d75fbb3e:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34049-0x1000d06dc2e0000, quorum=127.0.0.1:51082, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-09T23:46:23,705 DEBUG [master/2907d75fbb3e:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-09T23:46:23,706 INFO [master/2907d75fbb3e:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-09T23:46:23,707 DEBUG [master/2907d75fbb3e:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34049-0x1000d06dc2e0000, quorum=127.0.0.1:51082, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-09T23:46:23,715 DEBUG [master/2907d75fbb3e:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-09T23:46:23,715 INFO [master/2907d75fbb3e:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-09T23:46:23,717 DEBUG [master/2907d75fbb3e:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34049-0x1000d06dc2e0000, quorum=127.0.0.1:51082, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-09T23:46:23,723 DEBUG [master/2907d75fbb3e:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-09T23:46:23,724 DEBUG [master/2907d75fbb3e:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34049-0x1000d06dc2e0000, quorum=127.0.0.1:51082, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-09T23:46:23,732 DEBUG [master/2907d75fbb3e:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-09T23:46:23,735 DEBUG [master/2907d75fbb3e:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34049-0x1000d06dc2e0000, quorum=127.0.0.1:51082, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-09T23:46:23,740 DEBUG [master/2907d75fbb3e:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-09T23:46:23,748 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42829-0x1000d06dc2e0001, quorum=127.0.0.1:51082, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-09T23:46:23,748 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41843-0x1000d06dc2e0003, quorum=127.0.0.1:51082, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-09T23:46:23,748 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44271-0x1000d06dc2e0002, quorum=127.0.0.1:51082, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-09T23:46:23,748 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34049-0x1000d06dc2e0000, quorum=127.0.0.1:51082, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-09T23:46:23,748 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42829-0x1000d06dc2e0001, quorum=127.0.0.1:51082, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T23:46:23,749 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44271-0x1000d06dc2e0002, quorum=127.0.0.1:51082, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T23:46:23,749 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41843-0x1000d06dc2e0003, quorum=127.0.0.1:51082, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T23:46:23,749 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34049-0x1000d06dc2e0000, quorum=127.0.0.1:51082, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T23:46:23,750 INFO [master/2907d75fbb3e:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=2907d75fbb3e,34049,1733787983274, sessionid=0x1000d06dc2e0000, setting cluster-up flag (Was=false) 2024-12-09T23:46:23,765 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34049-0x1000d06dc2e0000, quorum=127.0.0.1:51082, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T23:46:23,765 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42829-0x1000d06dc2e0001, quorum=127.0.0.1:51082, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T23:46:23,765 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44271-0x1000d06dc2e0002, quorum=127.0.0.1:51082, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T23:46:23,765 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41843-0x1000d06dc2e0003, quorum=127.0.0.1:51082, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T23:46:23,790 DEBUG [master/2907d75fbb3e:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-09T23:46:23,791 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-09T23:46:23,792 DEBUG [master/2907d75fbb3e:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=2907d75fbb3e,34049,1733787983274 2024-12-09T23:46:23,802 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T23:46:23,807 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41843-0x1000d06dc2e0003, quorum=127.0.0.1:51082, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T23:46:23,807 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34049-0x1000d06dc2e0000, quorum=127.0.0.1:51082, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T23:46:23,807 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42829-0x1000d06dc2e0001, quorum=127.0.0.1:51082, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T23:46:23,807 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44271-0x1000d06dc2e0002, quorum=127.0.0.1:51082, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T23:46:23,832 DEBUG [master/2907d75fbb3e:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-09T23:46:23,833 DEBUG [master/2907d75fbb3e:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=2907d75fbb3e,34049,1733787983274 2024-12-09T23:46:23,835 INFO [master/2907d75fbb3e:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:41235/user/jenkins/test-data/3218311d-b9ae-7e67-6809-172703410d7a/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-09T23:46:23,837 DEBUG [master/2907d75fbb3e:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-09T23:46:23,838 INFO [master/2907d75fbb3e:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-09T23:46:23,838 INFO [master/2907d75fbb3e:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-09T23:46:23,838 DEBUG [master/2907d75fbb3e:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 2907d75fbb3e,34049,1733787983274 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-09T23:46:23,840 DEBUG [master/2907d75fbb3e:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/2907d75fbb3e:0, corePoolSize=5, maxPoolSize=5 2024-12-09T23:46:23,840 DEBUG [master/2907d75fbb3e:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/2907d75fbb3e:0, corePoolSize=5, maxPoolSize=5 2024-12-09T23:46:23,840 DEBUG [master/2907d75fbb3e:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/2907d75fbb3e:0, corePoolSize=5, maxPoolSize=5 2024-12-09T23:46:23,840 DEBUG [master/2907d75fbb3e:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/2907d75fbb3e:0, corePoolSize=5, maxPoolSize=5 2024-12-09T23:46:23,840 DEBUG [master/2907d75fbb3e:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/2907d75fbb3e:0, corePoolSize=10, maxPoolSize=10 2024-12-09T23:46:23,840 DEBUG [master/2907d75fbb3e:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/2907d75fbb3e:0, corePoolSize=1, maxPoolSize=1 2024-12-09T23:46:23,840 DEBUG [master/2907d75fbb3e:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/2907d75fbb3e:0, corePoolSize=2, maxPoolSize=2 2024-12-09T23:46:23,840 DEBUG [master/2907d75fbb3e:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/2907d75fbb3e:0, corePoolSize=1, maxPoolSize=1 2024-12-09T23:46:23,843 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-09T23:46:23,843 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-09T23:46:23,843 INFO [master/2907d75fbb3e:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733788013843 2024-12-09T23:46:23,843 INFO [master/2907d75fbb3e:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-09T23:46:23,843 INFO [master/2907d75fbb3e:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-09T23:46:23,844 INFO [master/2907d75fbb3e:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-09T23:46:23,844 INFO [master/2907d75fbb3e:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-09T23:46:23,844 INFO [master/2907d75fbb3e:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-09T23:46:23,844 INFO [master/2907d75fbb3e:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-09T23:46:23,844 INFO [master/2907d75fbb3e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-09T23:46:23,844 INFO [master/2907d75fbb3e:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-09T23:46:23,844 INFO [master/2907d75fbb3e:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-09T23:46:23,844 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T23:46:23,845 INFO [master/2907d75fbb3e:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-09T23:46:23,845 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-09T23:46:23,845 INFO [master/2907d75fbb3e:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-09T23:46:23,845 INFO [master/2907d75fbb3e:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-09T23:46:23,845 DEBUG [master/2907d75fbb3e:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/2907d75fbb3e:0:becomeActiveMaster-HFileCleaner.large.0-1733787983845,5,FailOnTimeoutGroup] 2024-12-09T23:46:23,847 DEBUG [master/2907d75fbb3e:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/2907d75fbb3e:0:becomeActiveMaster-HFileCleaner.small.0-1733787983845,5,FailOnTimeoutGroup] 2024-12-09T23:46:23,847 INFO [master/2907d75fbb3e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-09T23:46:23,847 INFO [master/2907d75fbb3e:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-09T23:46:23,847 INFO [master/2907d75fbb3e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-09T23:46:23,847 INFO [master/2907d75fbb3e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-09T23:46:23,853 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T23:46:23,854 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T23:46:23,858 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39731 is added to blk_1073741831_1007 (size=1321) 2024-12-09T23:46:23,858 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36613 is added to blk_1073741831_1007 (size=1321) 2024-12-09T23:46:23,858 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40249 is added to blk_1073741831_1007 (size=1321) 2024-12-09T23:46:23,860 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:41235/user/jenkins/test-data/3218311d-b9ae-7e67-6809-172703410d7a/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-09T23:46:23,860 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:41235/user/jenkins/test-data/3218311d-b9ae-7e67-6809-172703410d7a 2024-12-09T23:46:23,869 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36613 is added to blk_1073741832_1008 (size=32) 2024-12-09T23:46:23,869 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39731 is added to blk_1073741832_1008 (size=32) 2024-12-09T23:46:23,869 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40249 is added to blk_1073741832_1008 (size=32) 2024-12-09T23:46:23,870 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T23:46:23,872 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-09T23:46:23,873 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-09T23:46:23,873 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T23:46:23,873 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T23:46:23,874 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-09T23:46:23,875 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-09T23:46:23,875 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T23:46:23,875 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T23:46:23,875 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-09T23:46:23,877 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-09T23:46:23,877 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T23:46:23,877 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T23:46:23,877 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-09T23:46:23,878 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-09T23:46:23,878 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T23:46:23,879 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T23:46:23,879 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-09T23:46:23,880 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41235/user/jenkins/test-data/3218311d-b9ae-7e67-6809-172703410d7a/data/hbase/meta/1588230740 2024-12-09T23:46:23,880 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41235/user/jenkins/test-data/3218311d-b9ae-7e67-6809-172703410d7a/data/hbase/meta/1588230740 2024-12-09T23:46:23,882 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-09T23:46:23,882 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-09T23:46:23,883 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-09T23:46:23,884 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-09T23:46:23,887 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41235/user/jenkins/test-data/3218311d-b9ae-7e67-6809-172703410d7a/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T23:46:23,887 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=64928933, jitterRate=-0.032483503222465515}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-09T23:46:23,888 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733787983870Initializing all the Stores at 1733787983871 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733787983871Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733787983871Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733787983871Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733787983871Cleaning up temporary data from old regions at 1733787983882 (+11 ms)Region opened successfully at 1733787983888 (+6 ms) 2024-12-09T23:46:23,888 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-09T23:46:23,888 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-09T23:46:23,888 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-09T23:46:23,888 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-09T23:46:23,888 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-09T23:46:23,889 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-09T23:46:23,889 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733787983888Disabling compacts and flushes for region at 1733787983888Disabling writes for close at 1733787983888Writing region close event to WAL at 1733787983889 (+1 ms)Closed at 1733787983889 2024-12-09T23:46:23,890 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-09T23:46:23,890 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-09T23:46:23,890 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-09T23:46:23,892 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-09T23:46:23,893 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-09T23:46:23,918 INFO [RS:2;2907d75fbb3e:41843 {}] regionserver.HRegionServer(746): ClusterId : dc0f44c0-a9f7-450f-9452-98a8e23a9892 2024-12-09T23:46:23,918 INFO [RS:1;2907d75fbb3e:44271 {}] regionserver.HRegionServer(746): ClusterId : dc0f44c0-a9f7-450f-9452-98a8e23a9892 2024-12-09T23:46:23,918 INFO [RS:0;2907d75fbb3e:42829 {}] regionserver.HRegionServer(746): ClusterId : dc0f44c0-a9f7-450f-9452-98a8e23a9892 2024-12-09T23:46:23,918 DEBUG [RS:2;2907d75fbb3e:41843 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-09T23:46:23,918 DEBUG [RS:1;2907d75fbb3e:44271 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-09T23:46:23,918 DEBUG [RS:0;2907d75fbb3e:42829 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-09T23:46:23,948 DEBUG [RS:1;2907d75fbb3e:44271 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-09T23:46:23,948 DEBUG [RS:0;2907d75fbb3e:42829 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-09T23:46:23,948 DEBUG [RS:2;2907d75fbb3e:41843 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-09T23:46:23,948 DEBUG [RS:0;2907d75fbb3e:42829 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-09T23:46:23,948 DEBUG [RS:2;2907d75fbb3e:41843 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-09T23:46:23,948 DEBUG [RS:1;2907d75fbb3e:44271 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-09T23:46:23,957 DEBUG [RS:1;2907d75fbb3e:44271 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-09T23:46:23,957 DEBUG [RS:0;2907d75fbb3e:42829 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-09T23:46:23,957 DEBUG [RS:2;2907d75fbb3e:41843 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-09T23:46:23,958 DEBUG [RS:0;2907d75fbb3e:42829 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@67b5d40e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=2907d75fbb3e/172.17.0.2:0 2024-12-09T23:46:23,958 DEBUG [RS:2;2907d75fbb3e:41843 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@59237c84, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=2907d75fbb3e/172.17.0.2:0 2024-12-09T23:46:23,958 DEBUG [RS:1;2907d75fbb3e:44271 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@392d45bf, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=2907d75fbb3e/172.17.0.2:0 2024-12-09T23:46:23,969 DEBUG [RS:2;2907d75fbb3e:41843 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:2;2907d75fbb3e:41843 2024-12-09T23:46:23,969 INFO [RS:2;2907d75fbb3e:41843 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-09T23:46:23,969 INFO [RS:2;2907d75fbb3e:41843 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-09T23:46:23,969 DEBUG [RS:2;2907d75fbb3e:41843 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-09T23:46:23,971 INFO [RS:2;2907d75fbb3e:41843 {}] regionserver.HRegionServer(2659): reportForDuty to master=2907d75fbb3e,34049,1733787983274 with port=41843, startcode=1733787983487 2024-12-09T23:46:23,971 DEBUG [RS:2;2907d75fbb3e:41843 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-09T23:46:23,973 DEBUG [RS:1;2907d75fbb3e:44271 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;2907d75fbb3e:44271 2024-12-09T23:46:23,973 DEBUG [RS:0;2907d75fbb3e:42829 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;2907d75fbb3e:42829 2024-12-09T23:46:23,974 INFO [RS:1;2907d75fbb3e:44271 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-09T23:46:23,974 INFO [RS:0;2907d75fbb3e:42829 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-09T23:46:23,974 INFO [RS:0;2907d75fbb3e:42829 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-09T23:46:23,974 INFO [RS:1;2907d75fbb3e:44271 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-09T23:46:23,974 DEBUG [RS:1;2907d75fbb3e:44271 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-09T23:46:23,974 DEBUG [RS:0;2907d75fbb3e:42829 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-09T23:46:23,974 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35827, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.5 (auth:SIMPLE), service=RegionServerStatusService 2024-12-09T23:46:23,974 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34049 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 2907d75fbb3e,41843,1733787983487 2024-12-09T23:46:23,975 INFO [RS:0;2907d75fbb3e:42829 {}] regionserver.HRegionServer(2659): reportForDuty to master=2907d75fbb3e,34049,1733787983274 with port=42829, startcode=1733787983420 2024-12-09T23:46:23,975 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34049 {}] master.ServerManager(517): Registering regionserver=2907d75fbb3e,41843,1733787983487 2024-12-09T23:46:23,975 INFO [RS:1;2907d75fbb3e:44271 {}] regionserver.HRegionServer(2659): reportForDuty to master=2907d75fbb3e,34049,1733787983274 with port=44271, startcode=1733787983451 2024-12-09T23:46:23,975 DEBUG [RS:0;2907d75fbb3e:42829 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-09T23:46:23,975 DEBUG [RS:1;2907d75fbb3e:44271 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-09T23:46:23,977 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41387, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.3 (auth:SIMPLE), service=RegionServerStatusService 2024-12-09T23:46:23,977 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42919, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.4 (auth:SIMPLE), service=RegionServerStatusService 2024-12-09T23:46:23,977 DEBUG [RS:2;2907d75fbb3e:41843 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:41235/user/jenkins/test-data/3218311d-b9ae-7e67-6809-172703410d7a 2024-12-09T23:46:23,977 DEBUG [RS:2;2907d75fbb3e:41843 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:41235 2024-12-09T23:46:23,977 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34049 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 2907d75fbb3e,44271,1733787983451 2024-12-09T23:46:23,977 DEBUG [RS:2;2907d75fbb3e:41843 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-09T23:46:23,977 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34049 {}] master.ServerManager(517): Registering regionserver=2907d75fbb3e,44271,1733787983451 2024-12-09T23:46:23,979 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34049 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 2907d75fbb3e,42829,1733787983420 2024-12-09T23:46:23,979 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34049 {}] master.ServerManager(517): Registering regionserver=2907d75fbb3e,42829,1733787983420 2024-12-09T23:46:23,979 DEBUG [RS:1;2907d75fbb3e:44271 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:41235/user/jenkins/test-data/3218311d-b9ae-7e67-6809-172703410d7a 2024-12-09T23:46:23,979 DEBUG [RS:1;2907d75fbb3e:44271 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:41235 2024-12-09T23:46:23,979 DEBUG [RS:1;2907d75fbb3e:44271 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-09T23:46:23,981 DEBUG [RS:0;2907d75fbb3e:42829 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:41235/user/jenkins/test-data/3218311d-b9ae-7e67-6809-172703410d7a 2024-12-09T23:46:23,981 DEBUG [RS:0;2907d75fbb3e:42829 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:41235 2024-12-09T23:46:23,981 DEBUG [RS:0;2907d75fbb3e:42829 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-09T23:46:23,989 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34049-0x1000d06dc2e0000, quorum=127.0.0.1:51082, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-09T23:46:24,018 DEBUG [RS:2;2907d75fbb3e:41843 {}] zookeeper.ZKUtil(111): regionserver:41843-0x1000d06dc2e0003, quorum=127.0.0.1:51082, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/2907d75fbb3e,41843,1733787983487 2024-12-09T23:46:24,018 WARN [RS:2;2907d75fbb3e:41843 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-09T23:46:24,018 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [2907d75fbb3e,41843,1733787983487] 2024-12-09T23:46:24,018 INFO [RS:2;2907d75fbb3e:41843 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-09T23:46:24,018 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [2907d75fbb3e,42829,1733787983420] 2024-12-09T23:46:24,018 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [2907d75fbb3e,44271,1733787983451] 2024-12-09T23:46:24,018 DEBUG [RS:0;2907d75fbb3e:42829 {}] zookeeper.ZKUtil(111): regionserver:42829-0x1000d06dc2e0001, quorum=127.0.0.1:51082, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/2907d75fbb3e,42829,1733787983420 2024-12-09T23:46:24,018 DEBUG [RS:1;2907d75fbb3e:44271 {}] zookeeper.ZKUtil(111): regionserver:44271-0x1000d06dc2e0002, quorum=127.0.0.1:51082, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/2907d75fbb3e,44271,1733787983451 2024-12-09T23:46:24,018 WARN [RS:1;2907d75fbb3e:44271 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-09T23:46:24,018 WARN [RS:0;2907d75fbb3e:42829 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-09T23:46:24,018 DEBUG [RS:2;2907d75fbb3e:41843 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:41235/user/jenkins/test-data/3218311d-b9ae-7e67-6809-172703410d7a/WALs/2907d75fbb3e,41843,1733787983487 2024-12-09T23:46:24,018 INFO [RS:1;2907d75fbb3e:44271 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-09T23:46:24,018 INFO [RS:0;2907d75fbb3e:42829 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-09T23:46:24,018 DEBUG [RS:0;2907d75fbb3e:42829 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:41235/user/jenkins/test-data/3218311d-b9ae-7e67-6809-172703410d7a/WALs/2907d75fbb3e,42829,1733787983420 2024-12-09T23:46:24,018 DEBUG [RS:1;2907d75fbb3e:44271 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:41235/user/jenkins/test-data/3218311d-b9ae-7e67-6809-172703410d7a/WALs/2907d75fbb3e,44271,1733787983451 2024-12-09T23:46:24,022 INFO [RS:1;2907d75fbb3e:44271 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-09T23:46:24,022 INFO [RS:0;2907d75fbb3e:42829 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-09T23:46:24,025 INFO [RS:2;2907d75fbb3e:41843 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-09T23:46:24,025 INFO [RS:1;2907d75fbb3e:44271 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-09T23:46:24,025 INFO [RS:1;2907d75fbb3e:44271 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-09T23:46:24,025 INFO [RS:1;2907d75fbb3e:44271 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T23:46:24,027 INFO [RS:1;2907d75fbb3e:44271 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-09T23:46:24,028 INFO [RS:1;2907d75fbb3e:44271 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-09T23:46:24,028 INFO [RS:1;2907d75fbb3e:44271 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-09T23:46:24,028 DEBUG [RS:1;2907d75fbb3e:44271 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/2907d75fbb3e:0, corePoolSize=1, maxPoolSize=1 2024-12-09T23:46:24,028 DEBUG [RS:1;2907d75fbb3e:44271 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/2907d75fbb3e:0, corePoolSize=1, maxPoolSize=1 2024-12-09T23:46:24,028 DEBUG [RS:1;2907d75fbb3e:44271 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/2907d75fbb3e:0, corePoolSize=1, maxPoolSize=1 2024-12-09T23:46:24,029 DEBUG [RS:1;2907d75fbb3e:44271 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/2907d75fbb3e:0, corePoolSize=1, maxPoolSize=1 2024-12-09T23:46:24,029 DEBUG [RS:1;2907d75fbb3e:44271 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/2907d75fbb3e:0, corePoolSize=1, maxPoolSize=1 2024-12-09T23:46:24,029 DEBUG [RS:1;2907d75fbb3e:44271 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/2907d75fbb3e:0, corePoolSize=2, maxPoolSize=2 2024-12-09T23:46:24,029 DEBUG [RS:1;2907d75fbb3e:44271 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/2907d75fbb3e:0, corePoolSize=1, maxPoolSize=1 2024-12-09T23:46:24,029 DEBUG [RS:1;2907d75fbb3e:44271 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/2907d75fbb3e:0, corePoolSize=1, maxPoolSize=1 2024-12-09T23:46:24,029 DEBUG [RS:1;2907d75fbb3e:44271 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/2907d75fbb3e:0, corePoolSize=1, maxPoolSize=1 2024-12-09T23:46:24,029 DEBUG [RS:1;2907d75fbb3e:44271 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/2907d75fbb3e:0, corePoolSize=1, maxPoolSize=1 2024-12-09T23:46:24,029 DEBUG [RS:1;2907d75fbb3e:44271 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/2907d75fbb3e:0, corePoolSize=1, maxPoolSize=1 2024-12-09T23:46:24,029 DEBUG [RS:1;2907d75fbb3e:44271 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/2907d75fbb3e:0, corePoolSize=1, maxPoolSize=1 2024-12-09T23:46:24,029 DEBUG [RS:1;2907d75fbb3e:44271 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/2907d75fbb3e:0, corePoolSize=3, maxPoolSize=3 2024-12-09T23:46:24,029 DEBUG [RS:1;2907d75fbb3e:44271 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/2907d75fbb3e:0, corePoolSize=3, maxPoolSize=3 2024-12-09T23:46:24,031 INFO [RS:0;2907d75fbb3e:42829 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-09T23:46:24,033 INFO [RS:2;2907d75fbb3e:41843 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-09T23:46:24,033 INFO [RS:0;2907d75fbb3e:42829 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-09T23:46:24,033 INFO [RS:1;2907d75fbb3e:44271 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T23:46:24,033 INFO [RS:0;2907d75fbb3e:42829 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T23:46:24,033 INFO [RS:1;2907d75fbb3e:44271 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T23:46:24,033 INFO [RS:1;2907d75fbb3e:44271 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T23:46:24,034 INFO [RS:1;2907d75fbb3e:44271 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-09T23:46:24,034 INFO [RS:1;2907d75fbb3e:44271 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-09T23:46:24,034 INFO [RS:0;2907d75fbb3e:42829 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-09T23:46:24,034 INFO [RS:2;2907d75fbb3e:41843 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-09T23:46:24,034 INFO [RS:1;2907d75fbb3e:44271 {}] hbase.ChoreService(168): Chore ScheduledChore name=2907d75fbb3e,44271,1733787983451-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-09T23:46:24,034 INFO [RS:2;2907d75fbb3e:41843 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T23:46:24,035 INFO [RS:2;2907d75fbb3e:41843 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-09T23:46:24,035 INFO [RS:0;2907d75fbb3e:42829 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-09T23:46:24,035 INFO [RS:0;2907d75fbb3e:42829 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-09T23:46:24,035 DEBUG [RS:0;2907d75fbb3e:42829 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/2907d75fbb3e:0, corePoolSize=1, maxPoolSize=1 2024-12-09T23:46:24,035 DEBUG [RS:0;2907d75fbb3e:42829 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/2907d75fbb3e:0, corePoolSize=1, maxPoolSize=1 2024-12-09T23:46:24,035 DEBUG [RS:0;2907d75fbb3e:42829 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/2907d75fbb3e:0, corePoolSize=1, maxPoolSize=1 2024-12-09T23:46:24,035 DEBUG [RS:0;2907d75fbb3e:42829 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/2907d75fbb3e:0, corePoolSize=1, maxPoolSize=1 2024-12-09T23:46:24,036 DEBUG [RS:0;2907d75fbb3e:42829 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/2907d75fbb3e:0, corePoolSize=1, maxPoolSize=1 2024-12-09T23:46:24,036 DEBUG [RS:0;2907d75fbb3e:42829 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/2907d75fbb3e:0, corePoolSize=2, maxPoolSize=2 2024-12-09T23:46:24,036 INFO [RS:2;2907d75fbb3e:41843 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-09T23:46:24,036 DEBUG [RS:0;2907d75fbb3e:42829 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/2907d75fbb3e:0, corePoolSize=1, maxPoolSize=1 2024-12-09T23:46:24,036 DEBUG [RS:0;2907d75fbb3e:42829 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/2907d75fbb3e:0, corePoolSize=1, maxPoolSize=1 2024-12-09T23:46:24,036 INFO [RS:2;2907d75fbb3e:41843 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-09T23:46:24,036 DEBUG [RS:0;2907d75fbb3e:42829 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/2907d75fbb3e:0, corePoolSize=1, maxPoolSize=1 2024-12-09T23:46:24,036 DEBUG [RS:0;2907d75fbb3e:42829 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/2907d75fbb3e:0, corePoolSize=1, maxPoolSize=1 2024-12-09T23:46:24,036 DEBUG [RS:2;2907d75fbb3e:41843 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/2907d75fbb3e:0, corePoolSize=1, maxPoolSize=1 2024-12-09T23:46:24,036 DEBUG [RS:0;2907d75fbb3e:42829 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/2907d75fbb3e:0, corePoolSize=1, maxPoolSize=1 2024-12-09T23:46:24,036 DEBUG [RS:2;2907d75fbb3e:41843 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/2907d75fbb3e:0, corePoolSize=1, maxPoolSize=1 2024-12-09T23:46:24,036 DEBUG [RS:0;2907d75fbb3e:42829 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/2907d75fbb3e:0, corePoolSize=1, maxPoolSize=1 2024-12-09T23:46:24,036 DEBUG [RS:2;2907d75fbb3e:41843 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/2907d75fbb3e:0, corePoolSize=1, maxPoolSize=1 2024-12-09T23:46:24,036 DEBUG [RS:0;2907d75fbb3e:42829 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/2907d75fbb3e:0, corePoolSize=3, maxPoolSize=3 2024-12-09T23:46:24,036 DEBUG [RS:2;2907d75fbb3e:41843 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/2907d75fbb3e:0, corePoolSize=1, maxPoolSize=1 2024-12-09T23:46:24,036 DEBUG [RS:0;2907d75fbb3e:42829 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/2907d75fbb3e:0, corePoolSize=3, maxPoolSize=3 2024-12-09T23:46:24,036 DEBUG [RS:2;2907d75fbb3e:41843 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/2907d75fbb3e:0, corePoolSize=1, maxPoolSize=1 2024-12-09T23:46:24,036 DEBUG [RS:2;2907d75fbb3e:41843 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/2907d75fbb3e:0, corePoolSize=2, maxPoolSize=2 2024-12-09T23:46:24,037 DEBUG [RS:2;2907d75fbb3e:41843 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/2907d75fbb3e:0, corePoolSize=1, maxPoolSize=1 2024-12-09T23:46:24,037 DEBUG [RS:2;2907d75fbb3e:41843 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/2907d75fbb3e:0, corePoolSize=1, maxPoolSize=1 2024-12-09T23:46:24,037 DEBUG [RS:2;2907d75fbb3e:41843 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/2907d75fbb3e:0, corePoolSize=1, maxPoolSize=1 2024-12-09T23:46:24,037 DEBUG [RS:2;2907d75fbb3e:41843 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/2907d75fbb3e:0, corePoolSize=1, maxPoolSize=1 2024-12-09T23:46:24,037 DEBUG [RS:2;2907d75fbb3e:41843 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/2907d75fbb3e:0, corePoolSize=1, maxPoolSize=1 2024-12-09T23:46:24,037 DEBUG [RS:2;2907d75fbb3e:41843 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/2907d75fbb3e:0, corePoolSize=1, maxPoolSize=1 2024-12-09T23:46:24,037 DEBUG [RS:2;2907d75fbb3e:41843 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/2907d75fbb3e:0, corePoolSize=3, maxPoolSize=3 2024-12-09T23:46:24,037 DEBUG [RS:2;2907d75fbb3e:41843 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/2907d75fbb3e:0, corePoolSize=3, maxPoolSize=3 2024-12-09T23:46:24,039 INFO [RS:0;2907d75fbb3e:42829 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T23:46:24,039 INFO [RS:0;2907d75fbb3e:42829 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T23:46:24,039 INFO [RS:0;2907d75fbb3e:42829 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T23:46:24,039 INFO [RS:0;2907d75fbb3e:42829 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-09T23:46:24,039 INFO [RS:0;2907d75fbb3e:42829 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-09T23:46:24,039 INFO [RS:0;2907d75fbb3e:42829 {}] hbase.ChoreService(168): Chore ScheduledChore name=2907d75fbb3e,42829,1733787983420-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-09T23:46:24,043 INFO [RS:2;2907d75fbb3e:41843 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T23:46:24,043 INFO [RS:2;2907d75fbb3e:41843 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T23:46:24,043 INFO [RS:2;2907d75fbb3e:41843 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T23:46:24,043 INFO [RS:2;2907d75fbb3e:41843 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-09T23:46:24,043 INFO [RS:2;2907d75fbb3e:41843 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-09T23:46:24,044 WARN [2907d75fbb3e:34049 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-09T23:46:24,044 INFO [RS:2;2907d75fbb3e:41843 {}] hbase.ChoreService(168): Chore ScheduledChore name=2907d75fbb3e,41843,1733787983487-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-09T23:46:24,050 INFO [RS:1;2907d75fbb3e:44271 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-09T23:46:24,051 INFO [RS:1;2907d75fbb3e:44271 {}] hbase.ChoreService(168): Chore ScheduledChore name=2907d75fbb3e,44271,1733787983451-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T23:46:24,051 INFO [RS:1;2907d75fbb3e:44271 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T23:46:24,051 INFO [RS:1;2907d75fbb3e:44271 {}] regionserver.Replication(171): 2907d75fbb3e,44271,1733787983451 started 2024-12-09T23:46:24,053 INFO [RS:0;2907d75fbb3e:42829 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-09T23:46:24,053 INFO [RS:0;2907d75fbb3e:42829 {}] hbase.ChoreService(168): Chore ScheduledChore name=2907d75fbb3e,42829,1733787983420-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T23:46:24,053 INFO [RS:0;2907d75fbb3e:42829 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T23:46:24,054 INFO [RS:0;2907d75fbb3e:42829 {}] regionserver.Replication(171): 2907d75fbb3e,42829,1733787983420 started 2024-12-09T23:46:24,065 INFO [RS:0;2907d75fbb3e:42829 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T23:46:24,066 INFO [RS:0;2907d75fbb3e:42829 {}] regionserver.HRegionServer(1482): Serving as 2907d75fbb3e,42829,1733787983420, RpcServer on 2907d75fbb3e/172.17.0.2:42829, sessionid=0x1000d06dc2e0001 2024-12-09T23:46:24,066 INFO [RS:2;2907d75fbb3e:41843 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-09T23:46:24,066 DEBUG [RS:0;2907d75fbb3e:42829 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-09T23:46:24,066 DEBUG [RS:0;2907d75fbb3e:42829 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 2907d75fbb3e,42829,1733787983420 2024-12-09T23:46:24,066 DEBUG [RS:0;2907d75fbb3e:42829 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '2907d75fbb3e,42829,1733787983420' 2024-12-09T23:46:24,066 INFO [RS:2;2907d75fbb3e:41843 {}] hbase.ChoreService(168): Chore ScheduledChore name=2907d75fbb3e,41843,1733787983487-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T23:46:24,066 DEBUG [RS:0;2907d75fbb3e:42829 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-09T23:46:24,066 INFO [RS:2;2907d75fbb3e:41843 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T23:46:24,066 INFO [RS:2;2907d75fbb3e:41843 {}] regionserver.Replication(171): 2907d75fbb3e,41843,1733787983487 started 2024-12-09T23:46:24,067 DEBUG [RS:0;2907d75fbb3e:42829 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-09T23:46:24,067 DEBUG [RS:0;2907d75fbb3e:42829 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-09T23:46:24,067 DEBUG [RS:0;2907d75fbb3e:42829 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-09T23:46:24,067 DEBUG [RS:0;2907d75fbb3e:42829 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 2907d75fbb3e,42829,1733787983420 2024-12-09T23:46:24,067 DEBUG [RS:0;2907d75fbb3e:42829 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '2907d75fbb3e,42829,1733787983420' 2024-12-09T23:46:24,067 DEBUG [RS:0;2907d75fbb3e:42829 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-09T23:46:24,068 DEBUG [RS:0;2907d75fbb3e:42829 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-09T23:46:24,068 DEBUG [RS:0;2907d75fbb3e:42829 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-09T23:46:24,068 INFO [RS:0;2907d75fbb3e:42829 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-09T23:46:24,068 INFO [RS:0;2907d75fbb3e:42829 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-09T23:46:24,070 INFO [RS:1;2907d75fbb3e:44271 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T23:46:24,070 INFO [RS:1;2907d75fbb3e:44271 {}] regionserver.HRegionServer(1482): Serving as 2907d75fbb3e,44271,1733787983451, RpcServer on 2907d75fbb3e/172.17.0.2:44271, sessionid=0x1000d06dc2e0002 2024-12-09T23:46:24,070 DEBUG [RS:1;2907d75fbb3e:44271 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-09T23:46:24,070 DEBUG [RS:1;2907d75fbb3e:44271 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 2907d75fbb3e,44271,1733787983451 2024-12-09T23:46:24,070 DEBUG [RS:1;2907d75fbb3e:44271 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '2907d75fbb3e,44271,1733787983451' 2024-12-09T23:46:24,070 DEBUG [RS:1;2907d75fbb3e:44271 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-09T23:46:24,071 DEBUG [RS:1;2907d75fbb3e:44271 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-09T23:46:24,072 DEBUG [RS:1;2907d75fbb3e:44271 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-09T23:46:24,072 DEBUG [RS:1;2907d75fbb3e:44271 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-09T23:46:24,072 DEBUG [RS:1;2907d75fbb3e:44271 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 2907d75fbb3e,44271,1733787983451 2024-12-09T23:46:24,072 DEBUG [RS:1;2907d75fbb3e:44271 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '2907d75fbb3e,44271,1733787983451' 2024-12-09T23:46:24,072 DEBUG [RS:1;2907d75fbb3e:44271 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-09T23:46:24,073 DEBUG [RS:1;2907d75fbb3e:44271 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-09T23:46:24,073 DEBUG [RS:1;2907d75fbb3e:44271 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-09T23:46:24,073 INFO [RS:1;2907d75fbb3e:44271 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-09T23:46:24,073 INFO [RS:1;2907d75fbb3e:44271 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-09T23:46:24,081 INFO [RS:2;2907d75fbb3e:41843 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T23:46:24,082 INFO [RS:2;2907d75fbb3e:41843 {}] regionserver.HRegionServer(1482): Serving as 2907d75fbb3e,41843,1733787983487, RpcServer on 2907d75fbb3e/172.17.0.2:41843, sessionid=0x1000d06dc2e0003 2024-12-09T23:46:24,082 DEBUG [RS:2;2907d75fbb3e:41843 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-09T23:46:24,082 DEBUG [RS:2;2907d75fbb3e:41843 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 2907d75fbb3e,41843,1733787983487 2024-12-09T23:46:24,082 DEBUG [RS:2;2907d75fbb3e:41843 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '2907d75fbb3e,41843,1733787983487' 2024-12-09T23:46:24,082 DEBUG [RS:2;2907d75fbb3e:41843 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-09T23:46:24,083 DEBUG [RS:2;2907d75fbb3e:41843 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-09T23:46:24,083 DEBUG [RS:2;2907d75fbb3e:41843 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-09T23:46:24,083 DEBUG [RS:2;2907d75fbb3e:41843 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-09T23:46:24,083 DEBUG [RS:2;2907d75fbb3e:41843 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 2907d75fbb3e,41843,1733787983487 2024-12-09T23:46:24,083 DEBUG [RS:2;2907d75fbb3e:41843 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '2907d75fbb3e,41843,1733787983487' 2024-12-09T23:46:24,083 DEBUG [RS:2;2907d75fbb3e:41843 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-09T23:46:24,084 DEBUG [RS:2;2907d75fbb3e:41843 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-09T23:46:24,085 DEBUG [RS:2;2907d75fbb3e:41843 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-09T23:46:24,085 INFO [RS:2;2907d75fbb3e:41843 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-09T23:46:24,085 INFO [RS:2;2907d75fbb3e:41843 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-09T23:46:24,172 INFO [RS:0;2907d75fbb3e:42829 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=2907d75fbb3e%2C42829%2C1733787983420, suffix=, logDir=hdfs://localhost:41235/user/jenkins/test-data/3218311d-b9ae-7e67-6809-172703410d7a/WALs/2907d75fbb3e,42829,1733787983420, archiveDir=hdfs://localhost:41235/user/jenkins/test-data/3218311d-b9ae-7e67-6809-172703410d7a/oldWALs, maxLogs=32 2024-12-09T23:46:24,176 INFO [RS:0;2907d75fbb3e:42829 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 2907d75fbb3e%2C42829%2C1733787983420.1733787984176 2024-12-09T23:46:24,178 INFO [RS:1;2907d75fbb3e:44271 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=2907d75fbb3e%2C44271%2C1733787983451, suffix=, logDir=hdfs://localhost:41235/user/jenkins/test-data/3218311d-b9ae-7e67-6809-172703410d7a/WALs/2907d75fbb3e,44271,1733787983451, archiveDir=hdfs://localhost:41235/user/jenkins/test-data/3218311d-b9ae-7e67-6809-172703410d7a/oldWALs, maxLogs=32 2024-12-09T23:46:24,180 INFO [RS:1;2907d75fbb3e:44271 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 2907d75fbb3e%2C44271%2C1733787983451.1733787984180 2024-12-09T23:46:24,185 INFO [RS:0;2907d75fbb3e:42829 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/3218311d-b9ae-7e67-6809-172703410d7a/WALs/2907d75fbb3e,42829,1733787983420/2907d75fbb3e%2C42829%2C1733787983420.1733787984176 2024-12-09T23:46:24,186 DEBUG [RS:0;2907d75fbb3e:42829 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46815:46815),(127.0.0.1/127.0.0.1:46227:46227),(127.0.0.1/127.0.0.1:37341:37341)] 2024-12-09T23:46:24,187 INFO [RS:2;2907d75fbb3e:41843 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=2907d75fbb3e%2C41843%2C1733787983487, suffix=, logDir=hdfs://localhost:41235/user/jenkins/test-data/3218311d-b9ae-7e67-6809-172703410d7a/WALs/2907d75fbb3e,41843,1733787983487, archiveDir=hdfs://localhost:41235/user/jenkins/test-data/3218311d-b9ae-7e67-6809-172703410d7a/oldWALs, maxLogs=32 2024-12-09T23:46:24,188 INFO [RS:1;2907d75fbb3e:44271 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/3218311d-b9ae-7e67-6809-172703410d7a/WALs/2907d75fbb3e,44271,1733787983451/2907d75fbb3e%2C44271%2C1733787983451.1733787984180 2024-12-09T23:46:24,188 INFO [RS:2;2907d75fbb3e:41843 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 2907d75fbb3e%2C41843%2C1733787983487.1733787984188 2024-12-09T23:46:24,189 DEBUG [RS:1;2907d75fbb3e:44271 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46815:46815),(127.0.0.1/127.0.0.1:37341:37341),(127.0.0.1/127.0.0.1:46227:46227)] 2024-12-09T23:46:24,195 INFO [RS:2;2907d75fbb3e:41843 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/3218311d-b9ae-7e67-6809-172703410d7a/WALs/2907d75fbb3e,41843,1733787983487/2907d75fbb3e%2C41843%2C1733787983487.1733787984188 2024-12-09T23:46:24,199 DEBUG [RS:2;2907d75fbb3e:41843 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46227:46227),(127.0.0.1/127.0.0.1:37341:37341),(127.0.0.1/127.0.0.1:46815:46815)] 2024-12-09T23:46:24,294 DEBUG [2907d75fbb3e:34049 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=3, allServersCount=3 2024-12-09T23:46:24,294 DEBUG [2907d75fbb3e:34049 {}] balancer.BalancerClusterState(204): Hosts are {2907d75fbb3e=0} racks are {/default-rack=0} 2024-12-09T23:46:24,299 DEBUG [2907d75fbb3e:34049 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-09T23:46:24,299 DEBUG [2907d75fbb3e:34049 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-09T23:46:24,299 DEBUG [2907d75fbb3e:34049 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-09T23:46:24,299 DEBUG [2907d75fbb3e:34049 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-09T23:46:24,299 DEBUG [2907d75fbb3e:34049 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-09T23:46:24,299 DEBUG [2907d75fbb3e:34049 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-09T23:46:24,299 INFO [2907d75fbb3e:34049 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-09T23:46:24,299 INFO [2907d75fbb3e:34049 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-09T23:46:24,299 INFO [2907d75fbb3e:34049 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-09T23:46:24,299 DEBUG [2907d75fbb3e:34049 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-09T23:46:24,300 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=2907d75fbb3e,44271,1733787983451 2024-12-09T23:46:24,302 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 2907d75fbb3e,44271,1733787983451, state=OPENING 2024-12-09T23:46:24,313 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-09T23:46:24,323 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42829-0x1000d06dc2e0001, quorum=127.0.0.1:51082, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T23:46:24,323 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34049-0x1000d06dc2e0000, quorum=127.0.0.1:51082, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T23:46:24,323 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44271-0x1000d06dc2e0002, quorum=127.0.0.1:51082, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T23:46:24,323 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41843-0x1000d06dc2e0003, quorum=127.0.0.1:51082, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T23:46:24,324 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-09T23:46:24,324 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T23:46:24,324 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T23:46:24,325 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T23:46:24,325 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=2907d75fbb3e,44271,1733787983451}] 2024-12-09T23:46:24,325 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T23:46:24,481 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-09T23:46:24,485 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-9-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47615, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-09T23:46:24,492 INFO [RS_OPEN_META-regionserver/2907d75fbb3e:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-09T23:46:24,493 INFO [RS_OPEN_META-regionserver/2907d75fbb3e:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-09T23:46:24,496 INFO [RS_OPEN_META-regionserver/2907d75fbb3e:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=2907d75fbb3e%2C44271%2C1733787983451.meta, suffix=.meta, logDir=hdfs://localhost:41235/user/jenkins/test-data/3218311d-b9ae-7e67-6809-172703410d7a/WALs/2907d75fbb3e,44271,1733787983451, archiveDir=hdfs://localhost:41235/user/jenkins/test-data/3218311d-b9ae-7e67-6809-172703410d7a/oldWALs, maxLogs=32 2024-12-09T23:46:24,497 INFO [RS_OPEN_META-regionserver/2907d75fbb3e:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 2907d75fbb3e%2C44271%2C1733787983451.meta.1733787984496.meta 2024-12-09T23:46:24,505 INFO [RS_OPEN_META-regionserver/2907d75fbb3e:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/3218311d-b9ae-7e67-6809-172703410d7a/WALs/2907d75fbb3e,44271,1733787983451/2907d75fbb3e%2C44271%2C1733787983451.meta.1733787984496.meta 2024-12-09T23:46:24,506 DEBUG [RS_OPEN_META-regionserver/2907d75fbb3e:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46227:46227),(127.0.0.1/127.0.0.1:46815:46815),(127.0.0.1/127.0.0.1:37341:37341)] 2024-12-09T23:46:24,507 DEBUG [RS_OPEN_META-regionserver/2907d75fbb3e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-09T23:46:24,507 DEBUG [RS_OPEN_META-regionserver/2907d75fbb3e:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-09T23:46:24,508 DEBUG [RS_OPEN_META-regionserver/2907d75fbb3e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-09T23:46:24,508 INFO [RS_OPEN_META-regionserver/2907d75fbb3e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-09T23:46:24,508 DEBUG [RS_OPEN_META-regionserver/2907d75fbb3e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-09T23:46:24,508 DEBUG [RS_OPEN_META-regionserver/2907d75fbb3e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T23:46:24,508 DEBUG [RS_OPEN_META-regionserver/2907d75fbb3e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-09T23:46:24,508 DEBUG [RS_OPEN_META-regionserver/2907d75fbb3e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-09T23:46:24,510 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-09T23:46:24,511 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-09T23:46:24,511 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T23:46:24,511 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T23:46:24,512 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-09T23:46:24,512 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-09T23:46:24,513 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T23:46:24,513 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T23:46:24,513 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-09T23:46:24,514 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-09T23:46:24,514 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T23:46:24,514 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T23:46:24,515 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-09T23:46:24,516 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-09T23:46:24,516 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T23:46:24,516 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T23:46:24,516 DEBUG [RS_OPEN_META-regionserver/2907d75fbb3e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-09T23:46:24,517 DEBUG [RS_OPEN_META-regionserver/2907d75fbb3e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41235/user/jenkins/test-data/3218311d-b9ae-7e67-6809-172703410d7a/data/hbase/meta/1588230740 2024-12-09T23:46:24,519 DEBUG [RS_OPEN_META-regionserver/2907d75fbb3e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41235/user/jenkins/test-data/3218311d-b9ae-7e67-6809-172703410d7a/data/hbase/meta/1588230740 2024-12-09T23:46:24,521 DEBUG [RS_OPEN_META-regionserver/2907d75fbb3e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-09T23:46:24,521 DEBUG [RS_OPEN_META-regionserver/2907d75fbb3e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-09T23:46:24,522 DEBUG [RS_OPEN_META-regionserver/2907d75fbb3e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-09T23:46:24,524 DEBUG [RS_OPEN_META-regionserver/2907d75fbb3e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-09T23:46:24,525 INFO [RS_OPEN_META-regionserver/2907d75fbb3e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=73433683, jitterRate=0.09424714744091034}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-09T23:46:24,525 DEBUG [RS_OPEN_META-regionserver/2907d75fbb3e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-09T23:46:24,527 DEBUG [RS_OPEN_META-regionserver/2907d75fbb3e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733787984508Writing region info on filesystem at 1733787984508Initializing all the Stores at 1733787984509 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733787984509Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733787984510 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733787984510Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733787984510Cleaning up temporary data from old regions at 1733787984521 (+11 ms)Running coprocessor post-open hooks at 1733787984525 (+4 ms)Region opened successfully at 1733787984526 (+1 ms) 2024-12-09T23:46:24,528 INFO [RS_OPEN_META-regionserver/2907d75fbb3e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733787984481 2024-12-09T23:46:24,531 DEBUG [RS_OPEN_META-regionserver/2907d75fbb3e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-09T23:46:24,532 INFO [RS_OPEN_META-regionserver/2907d75fbb3e:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-09T23:46:24,532 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=2907d75fbb3e,44271,1733787983451 2024-12-09T23:46:24,534 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 2907d75fbb3e,44271,1733787983451, state=OPEN 2024-12-09T23:46:24,555 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44271-0x1000d06dc2e0002, quorum=127.0.0.1:51082, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T23:46:24,555 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41843-0x1000d06dc2e0003, quorum=127.0.0.1:51082, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T23:46:24,555 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42829-0x1000d06dc2e0001, quorum=127.0.0.1:51082, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T23:46:24,555 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34049-0x1000d06dc2e0000, quorum=127.0.0.1:51082, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T23:46:24,555 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=2907d75fbb3e,44271,1733787983451 2024-12-09T23:46:24,555 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T23:46:24,555 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T23:46:24,555 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T23:46:24,555 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T23:46:24,563 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-09T23:46:24,563 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=2907d75fbb3e,44271,1733787983451 in 230 msec 2024-12-09T23:46:24,567 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-09T23:46:24,567 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 673 msec 2024-12-09T23:46:24,569 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-09T23:46:24,569 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-09T23:46:24,570 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T23:46:24,571 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=2907d75fbb3e,44271,1733787983451, seqNum=-1] 2024-12-09T23:46:24,571 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T23:46:24,573 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-9-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39449, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T23:46:24,581 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 743 msec 2024-12-09T23:46:24,582 INFO [master/2907d75fbb3e:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733787984582, completionTime=-1 2024-12-09T23:46:24,582 INFO [master/2907d75fbb3e:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=3; waited=0ms, expected min=3 server(s), max=3 server(s), master is running 2024-12-09T23:46:24,582 DEBUG [master/2907d75fbb3e:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-09T23:46:24,584 INFO [master/2907d75fbb3e:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=3 2024-12-09T23:46:24,584 INFO [master/2907d75fbb3e:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733788044584 2024-12-09T23:46:24,585 INFO [master/2907d75fbb3e:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733788104584 2024-12-09T23:46:24,585 INFO [master/2907d75fbb3e:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-12-09T23:46:24,585 INFO [master/2907d75fbb3e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=2907d75fbb3e,34049,1733787983274-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T23:46:24,585 INFO [master/2907d75fbb3e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=2907d75fbb3e,34049,1733787983274-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T23:46:24,585 INFO [master/2907d75fbb3e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=2907d75fbb3e,34049,1733787983274-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T23:46:24,585 INFO [master/2907d75fbb3e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-2907d75fbb3e:34049, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T23:46:24,585 INFO [master/2907d75fbb3e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-09T23:46:24,585 INFO [master/2907d75fbb3e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-09T23:46:24,588 DEBUG [master/2907d75fbb3e:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-09T23:46:24,591 INFO [master/2907d75fbb3e:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.034sec 2024-12-09T23:46:24,592 INFO [master/2907d75fbb3e:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-09T23:46:24,592 INFO [master/2907d75fbb3e:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-09T23:46:24,592 INFO [master/2907d75fbb3e:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-09T23:46:24,592 INFO [master/2907d75fbb3e:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-09T23:46:24,592 INFO [master/2907d75fbb3e:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-09T23:46:24,592 INFO [master/2907d75fbb3e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=2907d75fbb3e,34049,1733787983274-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-09T23:46:24,592 INFO [master/2907d75fbb3e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=2907d75fbb3e,34049,1733787983274-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-09T23:46:24,595 DEBUG [master/2907d75fbb3e:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-09T23:46:24,595 INFO [master/2907d75fbb3e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-09T23:46:24,595 INFO [master/2907d75fbb3e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=2907d75fbb3e,34049,1733787983274-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T23:46:24,618 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@23d6b22d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T23:46:24,618 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 2907d75fbb3e,34049,-1 for getting cluster id 2024-12-09T23:46:24,618 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-09T23:46:24,620 DEBUG [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'dc0f44c0-a9f7-450f-9452-98a8e23a9892' 2024-12-09T23:46:24,620 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-09T23:46:24,620 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "dc0f44c0-a9f7-450f-9452-98a8e23a9892" 2024-12-09T23:46:24,621 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1f701087, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T23:46:24,621 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [2907d75fbb3e,34049,-1] 2024-12-09T23:46:24,621 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-09T23:46:24,621 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T23:46:24,622 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35070, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-09T23:46:24,624 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@62359306, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T23:46:24,624 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T23:46:24,625 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=2907d75fbb3e,44271,1733787983451, seqNum=-1] 2024-12-09T23:46:24,625 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T23:46:24,627 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-9-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57624, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T23:46:24,629 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=2907d75fbb3e,34049,1733787983274 2024-12-09T23:46:24,630 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-09T23:46:24,631 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] client.AsyncConnectionImpl(321): The fetched master address is 2907d75fbb3e,34049,1733787983274 2024-12-09T23:46:24,631 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@7c1d1764 2024-12-09T23:46:24,631 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-09T23:46:24,633 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35086, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-09T23:46:24,634 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34049 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-09T23:46:24,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34049 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC 2024-12-09T23:46:24,637 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_PRE_OPERATION 2024-12-09T23:46:24,638 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T23:46:24,638 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34049 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestHBaseWalOnEC" procId is: 4 2024-12-09T23:46:24,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-09T23:46:24,639 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-09T23:46:24,704 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40249 is added to blk_1073741837_1013 (size=392) 2024-12-09T23:46:24,734 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39731 is added to blk_1073741837_1013 (size=392) 2024-12-09T23:46:24,734 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36613 is added to blk_1073741837_1013 (size=392) 2024-12-09T23:46:24,736 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => c315fb59b265744b57d79b1541171bea, NAME => 'TestHBaseWalOnEC,,1733787984633.c315fb59b265744b57d79b1541171bea.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:41235/user/jenkins/test-data/3218311d-b9ae-7e67-6809-172703410d7a 2024-12-09T23:46:24,746 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39731 is added to blk_1073741838_1014 (size=51) 2024-12-09T23:46:24,746 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40249 is added to blk_1073741838_1014 (size=51) 2024-12-09T23:46:24,747 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36613 is added to blk_1073741838_1014 (size=51) 2024-12-09T23:46:24,747 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1733787984633.c315fb59b265744b57d79b1541171bea.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T23:46:24,747 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1722): Closing c315fb59b265744b57d79b1541171bea, disabling compactions & flushes 2024-12-09T23:46:24,747 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1733787984633.c315fb59b265744b57d79b1541171bea. 2024-12-09T23:46:24,747 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1733787984633.c315fb59b265744b57d79b1541171bea. 2024-12-09T23:46:24,748 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1733787984633.c315fb59b265744b57d79b1541171bea. after waiting 0 ms 2024-12-09T23:46:24,748 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1733787984633.c315fb59b265744b57d79b1541171bea. 2024-12-09T23:46:24,748 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1733787984633.c315fb59b265744b57d79b1541171bea. 2024-12-09T23:46:24,748 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1676): Region close journal for c315fb59b265744b57d79b1541171bea: Waiting for close lock at 1733787984747Disabling compacts and flushes for region at 1733787984747Disabling writes for close at 1733787984748 (+1 ms)Writing region close event to WAL at 1733787984748Closed at 1733787984748 2024-12-09T23:46:24,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-09T23:46:24,750 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ADD_TO_META 2024-12-09T23:46:24,750 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestHBaseWalOnEC,,1733787984633.c315fb59b265744b57d79b1541171bea.","families":{"info":[{"qualifier":"regioninfo","vlen":50,"tag":[],"timestamp":"1733787984750"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733787984750"}]},"ts":"1733787984750"} 2024-12-09T23:46:24,754 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-09T23:46:24,755 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-09T23:46:24,756 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733787984756"}]},"ts":"1733787984756"} 2024-12-09T23:46:24,759 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLING in hbase:meta 2024-12-09T23:46:24,759 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {2907d75fbb3e=0} racks are {/default-rack=0} 2024-12-09T23:46:24,760 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-09T23:46:24,760 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-09T23:46:24,760 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-09T23:46:24,760 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-09T23:46:24,760 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-09T23:46:24,760 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-09T23:46:24,760 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-09T23:46:24,760 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-09T23:46:24,760 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-09T23:46:24,761 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-09T23:46:24,761 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=c315fb59b265744b57d79b1541171bea, ASSIGN}] 2024-12-09T23:46:24,763 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=c315fb59b265744b57d79b1541171bea, ASSIGN 2024-12-09T23:46:24,764 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=c315fb59b265744b57d79b1541171bea, ASSIGN; state=OFFLINE, location=2907d75fbb3e,42829,1733787983420; forceNewPlan=false, retain=false 2024-12-09T23:46:24,915 INFO [2907d75fbb3e:34049 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-09T23:46:24,916 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=c315fb59b265744b57d79b1541171bea, regionState=OPENING, regionLocation=2907d75fbb3e,42829,1733787983420 2024-12-09T23:46:24,922 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-10-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=c315fb59b265744b57d79b1541171bea, ASSIGN because future has completed 2024-12-09T23:46:24,922 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure c315fb59b265744b57d79b1541171bea, server=2907d75fbb3e,42829,1733787983420}] 2024-12-09T23:46:24,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-09T23:46:25,076 DEBUG [RSProcedureDispatcher-pool-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-09T23:46:25,079 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59833, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-09T23:46:25,087 INFO [RS_OPEN_REGION-regionserver/2907d75fbb3e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestHBaseWalOnEC,,1733787984633.c315fb59b265744b57d79b1541171bea. 2024-12-09T23:46:25,087 DEBUG [RS_OPEN_REGION-regionserver/2907d75fbb3e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => c315fb59b265744b57d79b1541171bea, NAME => 'TestHBaseWalOnEC,,1733787984633.c315fb59b265744b57d79b1541171bea.', STARTKEY => '', ENDKEY => ''} 2024-12-09T23:46:25,087 DEBUG [RS_OPEN_REGION-regionserver/2907d75fbb3e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestHBaseWalOnEC c315fb59b265744b57d79b1541171bea 2024-12-09T23:46:25,087 DEBUG [RS_OPEN_REGION-regionserver/2907d75fbb3e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1733787984633.c315fb59b265744b57d79b1541171bea.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T23:46:25,087 DEBUG [RS_OPEN_REGION-regionserver/2907d75fbb3e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for c315fb59b265744b57d79b1541171bea 2024-12-09T23:46:25,087 DEBUG [RS_OPEN_REGION-regionserver/2907d75fbb3e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for c315fb59b265744b57d79b1541171bea 2024-12-09T23:46:25,089 INFO [StoreOpener-c315fb59b265744b57d79b1541171bea-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region c315fb59b265744b57d79b1541171bea 2024-12-09T23:46:25,090 INFO [StoreOpener-c315fb59b265744b57d79b1541171bea-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region c315fb59b265744b57d79b1541171bea columnFamilyName cf 2024-12-09T23:46:25,091 DEBUG [StoreOpener-c315fb59b265744b57d79b1541171bea-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T23:46:25,091 INFO [StoreOpener-c315fb59b265744b57d79b1541171bea-1 {}] regionserver.HStore(327): Store=c315fb59b265744b57d79b1541171bea/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T23:46:25,091 DEBUG [RS_OPEN_REGION-regionserver/2907d75fbb3e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for c315fb59b265744b57d79b1541171bea 2024-12-09T23:46:25,092 DEBUG [RS_OPEN_REGION-regionserver/2907d75fbb3e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41235/user/jenkins/test-data/3218311d-b9ae-7e67-6809-172703410d7a/data/default/TestHBaseWalOnEC/c315fb59b265744b57d79b1541171bea 2024-12-09T23:46:25,093 DEBUG [RS_OPEN_REGION-regionserver/2907d75fbb3e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41235/user/jenkins/test-data/3218311d-b9ae-7e67-6809-172703410d7a/data/default/TestHBaseWalOnEC/c315fb59b265744b57d79b1541171bea 2024-12-09T23:46:25,093 DEBUG [RS_OPEN_REGION-regionserver/2907d75fbb3e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for c315fb59b265744b57d79b1541171bea 2024-12-09T23:46:25,093 DEBUG [RS_OPEN_REGION-regionserver/2907d75fbb3e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for c315fb59b265744b57d79b1541171bea 2024-12-09T23:46:25,095 DEBUG [RS_OPEN_REGION-regionserver/2907d75fbb3e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for c315fb59b265744b57d79b1541171bea 2024-12-09T23:46:25,097 DEBUG [RS_OPEN_REGION-regionserver/2907d75fbb3e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41235/user/jenkins/test-data/3218311d-b9ae-7e67-6809-172703410d7a/data/default/TestHBaseWalOnEC/c315fb59b265744b57d79b1541171bea/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T23:46:25,098 INFO [RS_OPEN_REGION-regionserver/2907d75fbb3e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened c315fb59b265744b57d79b1541171bea; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=74484291, jitterRate=0.10990242660045624}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-09T23:46:25,098 DEBUG [RS_OPEN_REGION-regionserver/2907d75fbb3e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for c315fb59b265744b57d79b1541171bea 2024-12-09T23:46:25,099 DEBUG [RS_OPEN_REGION-regionserver/2907d75fbb3e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for c315fb59b265744b57d79b1541171bea: Running coprocessor pre-open hook at 1733787985087Writing region info on filesystem at 1733787985087Initializing all the Stores at 1733787985089 (+2 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733787985089Cleaning up temporary data from old regions at 1733787985093 (+4 ms)Running coprocessor post-open hooks at 1733787985098 (+5 ms)Region opened successfully at 1733787985099 (+1 ms) 2024-12-09T23:46:25,100 INFO [RS_OPEN_REGION-regionserver/2907d75fbb3e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestHBaseWalOnEC,,1733787984633.c315fb59b265744b57d79b1541171bea., pid=6, masterSystemTime=1733787985076 2024-12-09T23:46:25,103 DEBUG [RS_OPEN_REGION-regionserver/2907d75fbb3e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestHBaseWalOnEC,,1733787984633.c315fb59b265744b57d79b1541171bea. 2024-12-09T23:46:25,103 INFO [RS_OPEN_REGION-regionserver/2907d75fbb3e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestHBaseWalOnEC,,1733787984633.c315fb59b265744b57d79b1541171bea. 2024-12-09T23:46:25,104 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=c315fb59b265744b57d79b1541171bea, regionState=OPEN, openSeqNum=2, regionLocation=2907d75fbb3e,42829,1733787983420 2024-12-09T23:46:25,107 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-10-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure c315fb59b265744b57d79b1541171bea, server=2907d75fbb3e,42829,1733787983420 because future has completed 2024-12-09T23:46:25,113 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-09T23:46:25,115 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure c315fb59b265744b57d79b1541171bea, server=2907d75fbb3e,42829,1733787983420 in 187 msec 2024-12-09T23:46:25,117 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-09T23:46:25,117 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=c315fb59b265744b57d79b1541171bea, ASSIGN in 352 msec 2024-12-09T23:46:25,118 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-09T23:46:25,119 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733787985118"}]},"ts":"1733787985118"} 2024-12-09T23:46:25,122 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLED in hbase:meta 2024-12-09T23:46:25,123 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_POST_OPERATION 2024-12-09T23:46:25,127 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC in 490 msec 2024-12-09T23:46:25,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-09T23:46:25,269 INFO [RPCClient-NioEventLoopGroup-6-8 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestHBaseWalOnEC completed 2024-12-09T23:46:25,269 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table TestHBaseWalOnEC get assigned. Timeout = 60000ms 2024-12-09T23:46:25,270 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-09T23:46:25,274 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table TestHBaseWalOnEC assigned to meta. Checking AM states. 2024-12-09T23:46:25,274 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-09T23:46:25,274 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table TestHBaseWalOnEC assigned. 2024-12-09T23:46:25,278 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestHBaseWalOnEC', row='row', locateType=CURRENT is [region=TestHBaseWalOnEC,,1733787984633.c315fb59b265744b57d79b1541171bea., hostname=2907d75fbb3e,42829,1733787983420, seqNum=2] 2024-12-09T23:46:25,279 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T23:46:25,281 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35032, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T23:46:25,285 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34049 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestHBaseWalOnEC 2024-12-09T23:46:25,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34049 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC 2024-12-09T23:46:25,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-09T23:46:25,288 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_PREPARE 2024-12-09T23:46:25,289 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-09T23:46:25,289 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-09T23:46:25,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-09T23:46:25,444 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42829 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-12-09T23:46:25,444 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2907d75fbb3e:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestHBaseWalOnEC,,1733787984633.c315fb59b265744b57d79b1541171bea. 2024-12-09T23:46:25,444 INFO [RS_FLUSH_OPERATIONS-regionserver/2907d75fbb3e:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing c315fb59b265744b57d79b1541171bea 1/1 column families, dataSize=32 B heapSize=360 B 2024-12-09T23:46:25,463 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2907d75fbb3e:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41235/user/jenkins/test-data/3218311d-b9ae-7e67-6809-172703410d7a/data/default/TestHBaseWalOnEC/c315fb59b265744b57d79b1541171bea/.tmp/cf/5f0520497df44d11ae71636bd7b75156 is 36, key is row/cf:cq/1733787985282/Put/seqid=0 2024-12-09T23:46:25,470 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40249 is added to blk_1073741839_1015 (size=4787) 2024-12-09T23:46:25,470 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39731 is added to blk_1073741839_1015 (size=4787) 2024-12-09T23:46:25,471 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36613 is added to blk_1073741839_1015 (size=4787) 2024-12-09T23:46:25,471 INFO [RS_FLUSH_OPERATIONS-regionserver/2907d75fbb3e:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=32 B at sequenceid=5 (bloomFilter=false), to=hdfs://localhost:41235/user/jenkins/test-data/3218311d-b9ae-7e67-6809-172703410d7a/data/default/TestHBaseWalOnEC/c315fb59b265744b57d79b1541171bea/.tmp/cf/5f0520497df44d11ae71636bd7b75156 2024-12-09T23:46:25,481 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2907d75fbb3e:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41235/user/jenkins/test-data/3218311d-b9ae-7e67-6809-172703410d7a/data/default/TestHBaseWalOnEC/c315fb59b265744b57d79b1541171bea/.tmp/cf/5f0520497df44d11ae71636bd7b75156 as hdfs://localhost:41235/user/jenkins/test-data/3218311d-b9ae-7e67-6809-172703410d7a/data/default/TestHBaseWalOnEC/c315fb59b265744b57d79b1541171bea/cf/5f0520497df44d11ae71636bd7b75156 2024-12-09T23:46:25,490 INFO [RS_FLUSH_OPERATIONS-regionserver/2907d75fbb3e:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41235/user/jenkins/test-data/3218311d-b9ae-7e67-6809-172703410d7a/data/default/TestHBaseWalOnEC/c315fb59b265744b57d79b1541171bea/cf/5f0520497df44d11ae71636bd7b75156, entries=1, sequenceid=5, filesize=4.7 K 2024-12-09T23:46:25,492 INFO [RS_FLUSH_OPERATIONS-regionserver/2907d75fbb3e:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~32 B/32, heapSize ~344 B/344, currentSize=0 B/0 for c315fb59b265744b57d79b1541171bea in 48ms, sequenceid=5, compaction requested=false 2024-12-09T23:46:25,492 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2907d75fbb3e:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for c315fb59b265744b57d79b1541171bea: 2024-12-09T23:46:25,492 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2907d75fbb3e:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestHBaseWalOnEC,,1733787984633.c315fb59b265744b57d79b1541171bea. 2024-12-09T23:46:25,492 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2907d75fbb3e:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-12-09T23:46:25,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34049 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-12-09T23:46:25,498 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-12-09T23:46:25,498 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 206 msec 2024-12-09T23:46:25,502 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC in 215 msec 2024-12-09T23:46:25,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-09T23:46:25,609 INFO [RPCClient-NioEventLoopGroup-6-8 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestHBaseWalOnEC completed 2024-12-09T23:46:25,613 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-09T23:46:25,613 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-09T23:46:25,613 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T23:46:25,613 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T23:46:25,614 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T23:46:25,614 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-09T23:46:25,614 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-09T23:46:25,614 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1163089687, stopped=false 2024-12-09T23:46:25,614 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=2907d75fbb3e,34049,1733787983274 2024-12-09T23:46:25,673 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42829-0x1000d06dc2e0001, quorum=127.0.0.1:51082, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-09T23:46:25,673 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41843-0x1000d06dc2e0003, quorum=127.0.0.1:51082, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-09T23:46:25,673 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44271-0x1000d06dc2e0002, quorum=127.0.0.1:51082, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-09T23:46:25,673 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34049-0x1000d06dc2e0000, quorum=127.0.0.1:51082, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-09T23:46:25,673 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42829-0x1000d06dc2e0001, quorum=127.0.0.1:51082, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T23:46:25,673 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44271-0x1000d06dc2e0002, quorum=127.0.0.1:51082, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T23:46:25,673 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41843-0x1000d06dc2e0003, quorum=127.0.0.1:51082, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T23:46:25,673 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-09T23:46:25,673 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34049-0x1000d06dc2e0000, quorum=127.0.0.1:51082, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T23:46:25,673 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-09T23:46:25,674 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T23:46:25,674 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T23:46:25,674 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:41843-0x1000d06dc2e0003, quorum=127.0.0.1:51082, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T23:46:25,674 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:42829-0x1000d06dc2e0001, quorum=127.0.0.1:51082, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T23:46:25,674 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:44271-0x1000d06dc2e0002, quorum=127.0.0.1:51082, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T23:46:25,674 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:34049-0x1000d06dc2e0000, quorum=127.0.0.1:51082, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T23:46:25,674 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '2907d75fbb3e,42829,1733787983420' ***** 2024-12-09T23:46:25,674 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-09T23:46:25,675 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '2907d75fbb3e,44271,1733787983451' ***** 2024-12-09T23:46:25,675 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-09T23:46:25,675 INFO [RS:0;2907d75fbb3e:42829 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-09T23:46:25,675 INFO [RS:1;2907d75fbb3e:44271 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-09T23:46:25,675 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-09T23:46:25,675 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-09T23:46:25,675 INFO [RS:0;2907d75fbb3e:42829 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-09T23:46:25,675 INFO [RS:1;2907d75fbb3e:44271 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-09T23:46:25,675 INFO [RS:0;2907d75fbb3e:42829 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-09T23:46:25,675 INFO [RS:1;2907d75fbb3e:44271 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-09T23:46:25,675 INFO [RS:1;2907d75fbb3e:44271 {}] regionserver.HRegionServer(959): stopping server 2907d75fbb3e,44271,1733787983451 2024-12-09T23:46:25,675 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '2907d75fbb3e,41843,1733787983487' ***** 2024-12-09T23:46:25,675 INFO [RS:0;2907d75fbb3e:42829 {}] regionserver.HRegionServer(3091): Received CLOSE for c315fb59b265744b57d79b1541171bea 2024-12-09T23:46:25,675 INFO [RS:1;2907d75fbb3e:44271 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-09T23:46:25,675 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-09T23:46:25,675 INFO [RS:1;2907d75fbb3e:44271 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;2907d75fbb3e:44271. 2024-12-09T23:46:25,675 INFO [RS:2;2907d75fbb3e:41843 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-09T23:46:25,675 DEBUG [RS:1;2907d75fbb3e:44271 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T23:46:25,675 INFO [RS:0;2907d75fbb3e:42829 {}] regionserver.HRegionServer(959): stopping server 2907d75fbb3e,42829,1733787983420 2024-12-09T23:46:25,675 DEBUG [RS:1;2907d75fbb3e:44271 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T23:46:25,675 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-09T23:46:25,675 INFO [RS:0;2907d75fbb3e:42829 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-09T23:46:25,676 INFO [RS:1;2907d75fbb3e:44271 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-09T23:46:25,676 INFO [RS:0;2907d75fbb3e:42829 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;2907d75fbb3e:42829. 2024-12-09T23:46:25,676 INFO [RS:2;2907d75fbb3e:41843 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-09T23:46:25,676 INFO [RS:1;2907d75fbb3e:44271 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-09T23:46:25,676 DEBUG [RS_CLOSE_REGION-regionserver/2907d75fbb3e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing c315fb59b265744b57d79b1541171bea, disabling compactions & flushes 2024-12-09T23:46:25,676 INFO [RS:2;2907d75fbb3e:41843 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-09T23:46:25,676 INFO [RS:1;2907d75fbb3e:44271 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-09T23:46:25,676 DEBUG [RS:0;2907d75fbb3e:42829 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T23:46:25,676 INFO [RS:2;2907d75fbb3e:41843 {}] regionserver.HRegionServer(959): stopping server 2907d75fbb3e,41843,1733787983487 2024-12-09T23:46:25,676 INFO [RS_CLOSE_REGION-regionserver/2907d75fbb3e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1733787984633.c315fb59b265744b57d79b1541171bea. 2024-12-09T23:46:25,676 INFO [RS:1;2907d75fbb3e:44271 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-09T23:46:25,676 INFO [RS:2;2907d75fbb3e:41843 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-09T23:46:25,676 DEBUG [RS:0;2907d75fbb3e:42829 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T23:46:25,676 DEBUG [RS_CLOSE_REGION-regionserver/2907d75fbb3e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1733787984633.c315fb59b265744b57d79b1541171bea. 2024-12-09T23:46:25,676 INFO [RS:2;2907d75fbb3e:41843 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:2;2907d75fbb3e:41843. 2024-12-09T23:46:25,676 DEBUG [RS_CLOSE_REGION-regionserver/2907d75fbb3e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1733787984633.c315fb59b265744b57d79b1541171bea. after waiting 0 ms 2024-12-09T23:46:25,676 INFO [RS:0;2907d75fbb3e:42829 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-12-09T23:46:25,676 DEBUG [RS_CLOSE_REGION-regionserver/2907d75fbb3e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1733787984633.c315fb59b265744b57d79b1541171bea. 2024-12-09T23:46:25,676 DEBUG [RS:0;2907d75fbb3e:42829 {}] regionserver.HRegionServer(1325): Online Regions={c315fb59b265744b57d79b1541171bea=TestHBaseWalOnEC,,1733787984633.c315fb59b265744b57d79b1541171bea.} 2024-12-09T23:46:25,676 DEBUG [RS:2;2907d75fbb3e:41843 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T23:46:25,676 DEBUG [RS:2;2907d75fbb3e:41843 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T23:46:25,676 DEBUG [RS:0;2907d75fbb3e:42829 {}] regionserver.HRegionServer(1351): Waiting on c315fb59b265744b57d79b1541171bea 2024-12-09T23:46:25,676 INFO [RS:2;2907d75fbb3e:41843 {}] regionserver.HRegionServer(976): stopping server 2907d75fbb3e,41843,1733787983487; all regions closed. 2024-12-09T23:46:25,676 INFO [RS:1;2907d75fbb3e:44271 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-12-09T23:46:25,676 DEBUG [RS:1;2907d75fbb3e:44271 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-12-09T23:46:25,677 DEBUG [RS:1;2907d75fbb3e:44271 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-12-09T23:46:25,677 DEBUG [RS_CLOSE_META-regionserver/2907d75fbb3e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-09T23:46:25,677 INFO [RS_CLOSE_META-regionserver/2907d75fbb3e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-09T23:46:25,677 DEBUG [RS_CLOSE_META-regionserver/2907d75fbb3e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-09T23:46:25,677 DEBUG [RS_CLOSE_META-regionserver/2907d75fbb3e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-09T23:46:25,677 DEBUG [RS_CLOSE_META-regionserver/2907d75fbb3e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-09T23:46:25,677 INFO [RS_CLOSE_META-regionserver/2907d75fbb3e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.34 KB heapSize=3.38 KB 2024-12-09T23:46:25,678 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T23:46:25,679 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T23:46:25,679 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T23:46:25,679 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T23:46:25,679 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T23:46:25,682 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36613 is added to blk_1073741835_1011 (size=93) 2024-12-09T23:46:25,683 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40249 is added to blk_1073741835_1011 (size=93) 2024-12-09T23:46:25,683 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39731 is added to blk_1073741835_1011 (size=93) 2024-12-09T23:46:25,686 DEBUG [RS:2;2907d75fbb3e:41843 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/3218311d-b9ae-7e67-6809-172703410d7a/oldWALs 2024-12-09T23:46:25,686 INFO [RS:2;2907d75fbb3e:41843 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 2907d75fbb3e%2C41843%2C1733787983487:(num 1733787984188) 2024-12-09T23:46:25,686 DEBUG [RS:2;2907d75fbb3e:41843 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T23:46:25,686 INFO [RS:2;2907d75fbb3e:41843 {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T23:46:25,686 INFO [RS:2;2907d75fbb3e:41843 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-09T23:46:25,687 INFO [RS:2;2907d75fbb3e:41843 {}] hbase.ChoreService(370): Chore service for: regionserver/2907d75fbb3e:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-09T23:46:25,687 DEBUG [RS_CLOSE_REGION-regionserver/2907d75fbb3e:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41235/user/jenkins/test-data/3218311d-b9ae-7e67-6809-172703410d7a/data/default/TestHBaseWalOnEC/c315fb59b265744b57d79b1541171bea/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-09T23:46:25,687 INFO [RS:2;2907d75fbb3e:41843 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-09T23:46:25,687 INFO [regionserver/2907d75fbb3e:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-09T23:46:25,687 INFO [RS:2;2907d75fbb3e:41843 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-09T23:46:25,687 INFO [RS:2;2907d75fbb3e:41843 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-09T23:46:25,687 INFO [RS:2;2907d75fbb3e:41843 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-09T23:46:25,687 INFO [RS:2;2907d75fbb3e:41843 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:41843 2024-12-09T23:46:25,688 INFO [RS_CLOSE_REGION-regionserver/2907d75fbb3e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1733787984633.c315fb59b265744b57d79b1541171bea. 2024-12-09T23:46:25,689 DEBUG [RS_CLOSE_REGION-regionserver/2907d75fbb3e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for c315fb59b265744b57d79b1541171bea: Waiting for close lock at 1733787985675Running coprocessor pre-close hooks at 1733787985676 (+1 ms)Disabling compacts and flushes for region at 1733787985676Disabling writes for close at 1733787985676Writing region close event to WAL at 1733787985679 (+3 ms)Running coprocessor post-close hooks at 1733787985688 (+9 ms)Closed at 1733787985688 2024-12-09T23:46:25,689 DEBUG [RS_CLOSE_REGION-regionserver/2907d75fbb3e:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestHBaseWalOnEC,,1733787984633.c315fb59b265744b57d79b1541171bea. 2024-12-09T23:46:25,698 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34049-0x1000d06dc2e0000, quorum=127.0.0.1:51082, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-09T23:46:25,698 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41843-0x1000d06dc2e0003, quorum=127.0.0.1:51082, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/2907d75fbb3e,41843,1733787983487 2024-12-09T23:46:25,698 INFO [RS:2;2907d75fbb3e:41843 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-09T23:46:25,699 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [2907d75fbb3e,41843,1733787983487] 2024-12-09T23:46:25,702 DEBUG [RS_CLOSE_META-regionserver/2907d75fbb3e:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41235/user/jenkins/test-data/3218311d-b9ae-7e67-6809-172703410d7a/data/hbase/meta/1588230740/.tmp/info/dde76ff1bbbe4615a22f9f443859414d is 153, key is TestHBaseWalOnEC,,1733787984633.c315fb59b265744b57d79b1541171bea./info:regioninfo/1733787985104/Put/seqid=0 2024-12-09T23:46:25,711 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40249 is added to blk_1073741840_1016 (size=6637) 2024-12-09T23:46:25,711 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36613 is added to blk_1073741840_1016 (size=6637) 2024-12-09T23:46:25,712 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39731 is added to blk_1073741840_1016 (size=6637) 2024-12-09T23:46:25,712 INFO [RS_CLOSE_META-regionserver/2907d75fbb3e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.18 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:41235/user/jenkins/test-data/3218311d-b9ae-7e67-6809-172703410d7a/data/hbase/meta/1588230740/.tmp/info/dde76ff1bbbe4615a22f9f443859414d 2024-12-09T23:46:25,714 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/2907d75fbb3e,41843,1733787983487 already deleted, retry=false 2024-12-09T23:46:25,714 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 2907d75fbb3e,41843,1733787983487 expired; onlineServers=2 2024-12-09T23:46:25,731 DEBUG [RS_CLOSE_META-regionserver/2907d75fbb3e:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41235/user/jenkins/test-data/3218311d-b9ae-7e67-6809-172703410d7a/data/hbase/meta/1588230740/.tmp/ns/defa3d4a08bb469a8b5ff1ede23e8468 is 43, key is default/ns:d/1733787984573/Put/seqid=0 2024-12-09T23:46:25,736 INFO [regionserver/2907d75fbb3e:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T23:46:25,738 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36613 is added to blk_1073741841_1017 (size=5153) 2024-12-09T23:46:25,738 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40249 is added to blk_1073741841_1017 (size=5153) 2024-12-09T23:46:25,738 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39731 is added to blk_1073741841_1017 (size=5153) 2024-12-09T23:46:25,739 INFO [RS_CLOSE_META-regionserver/2907d75fbb3e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:41235/user/jenkins/test-data/3218311d-b9ae-7e67-6809-172703410d7a/data/hbase/meta/1588230740/.tmp/ns/defa3d4a08bb469a8b5ff1ede23e8468 2024-12-09T23:46:25,741 INFO [regionserver/2907d75fbb3e:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T23:46:25,749 INFO [regionserver/2907d75fbb3e:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T23:46:25,766 DEBUG [RS_CLOSE_META-regionserver/2907d75fbb3e:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41235/user/jenkins/test-data/3218311d-b9ae-7e67-6809-172703410d7a/data/hbase/meta/1588230740/.tmp/table/1ed7519564f0491e95fb4e66ab0b2493 is 52, key is TestHBaseWalOnEC/table:state/1733787985118/Put/seqid=0 2024-12-09T23:46:25,773 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39731 is added to blk_1073741842_1018 (size=5249) 2024-12-09T23:46:25,773 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40249 is added to blk_1073741842_1018 (size=5249) 2024-12-09T23:46:25,774 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36613 is added to blk_1073741842_1018 (size=5249) 2024-12-09T23:46:25,774 INFO [RS_CLOSE_META-regionserver/2907d75fbb3e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=96 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:41235/user/jenkins/test-data/3218311d-b9ae-7e67-6809-172703410d7a/data/hbase/meta/1588230740/.tmp/table/1ed7519564f0491e95fb4e66ab0b2493 2024-12-09T23:46:25,782 DEBUG [RS_CLOSE_META-regionserver/2907d75fbb3e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41235/user/jenkins/test-data/3218311d-b9ae-7e67-6809-172703410d7a/data/hbase/meta/1588230740/.tmp/info/dde76ff1bbbe4615a22f9f443859414d as hdfs://localhost:41235/user/jenkins/test-data/3218311d-b9ae-7e67-6809-172703410d7a/data/hbase/meta/1588230740/info/dde76ff1bbbe4615a22f9f443859414d 2024-12-09T23:46:25,790 INFO [RS_CLOSE_META-regionserver/2907d75fbb3e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41235/user/jenkins/test-data/3218311d-b9ae-7e67-6809-172703410d7a/data/hbase/meta/1588230740/info/dde76ff1bbbe4615a22f9f443859414d, entries=10, sequenceid=11, filesize=6.5 K 2024-12-09T23:46:25,791 DEBUG [RS_CLOSE_META-regionserver/2907d75fbb3e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41235/user/jenkins/test-data/3218311d-b9ae-7e67-6809-172703410d7a/data/hbase/meta/1588230740/.tmp/ns/defa3d4a08bb469a8b5ff1ede23e8468 as hdfs://localhost:41235/user/jenkins/test-data/3218311d-b9ae-7e67-6809-172703410d7a/data/hbase/meta/1588230740/ns/defa3d4a08bb469a8b5ff1ede23e8468 2024-12-09T23:46:25,799 INFO [RS_CLOSE_META-regionserver/2907d75fbb3e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41235/user/jenkins/test-data/3218311d-b9ae-7e67-6809-172703410d7a/data/hbase/meta/1588230740/ns/defa3d4a08bb469a8b5ff1ede23e8468, entries=2, sequenceid=11, filesize=5.0 K 2024-12-09T23:46:25,801 DEBUG [RS_CLOSE_META-regionserver/2907d75fbb3e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41235/user/jenkins/test-data/3218311d-b9ae-7e67-6809-172703410d7a/data/hbase/meta/1588230740/.tmp/table/1ed7519564f0491e95fb4e66ab0b2493 as hdfs://localhost:41235/user/jenkins/test-data/3218311d-b9ae-7e67-6809-172703410d7a/data/hbase/meta/1588230740/table/1ed7519564f0491e95fb4e66ab0b2493 2024-12-09T23:46:25,806 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41843-0x1000d06dc2e0003, quorum=127.0.0.1:51082, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T23:46:25,806 INFO [RS:2;2907d75fbb3e:41843 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-09T23:46:25,806 INFO [RS:2;2907d75fbb3e:41843 {}] regionserver.HRegionServer(1031): Exiting; stopping=2907d75fbb3e,41843,1733787983487; zookeeper connection closed. 2024-12-09T23:46:25,806 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41843-0x1000d06dc2e0003, quorum=127.0.0.1:51082, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T23:46:25,807 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@7c62b598 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@7c62b598 2024-12-09T23:46:25,810 INFO [RS_CLOSE_META-regionserver/2907d75fbb3e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41235/user/jenkins/test-data/3218311d-b9ae-7e67-6809-172703410d7a/data/hbase/meta/1588230740/table/1ed7519564f0491e95fb4e66ab0b2493, entries=2, sequenceid=11, filesize=5.1 K 2024-12-09T23:46:25,812 INFO [RS_CLOSE_META-regionserver/2907d75fbb3e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 135ms, sequenceid=11, compaction requested=false 2024-12-09T23:46:25,818 DEBUG [RS_CLOSE_META-regionserver/2907d75fbb3e:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41235/user/jenkins/test-data/3218311d-b9ae-7e67-6809-172703410d7a/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-12-09T23:46:25,819 DEBUG [RS_CLOSE_META-regionserver/2907d75fbb3e:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-09T23:46:25,819 INFO [RS_CLOSE_META-regionserver/2907d75fbb3e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-09T23:46:25,819 DEBUG [RS_CLOSE_META-regionserver/2907d75fbb3e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733787985676Running coprocessor pre-close hooks at 1733787985676Disabling compacts and flushes for region at 1733787985676Disabling writes for close at 1733787985677 (+1 ms)Obtaining lock to block concurrent updates at 1733787985677Preparing flush snapshotting stores in 1588230740 at 1733787985677Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1377, getHeapSize=3392, getOffHeapSize=0, getCellsCount=14 at 1733787985677Flushing stores of hbase:meta,,1.1588230740 at 1733787985678 (+1 ms)Flushing 1588230740/info: creating writer at 1733787985678Flushing 1588230740/info: appending metadata at 1733787985702 (+24 ms)Flushing 1588230740/info: closing flushed file at 1733787985702Flushing 1588230740/ns: creating writer at 1733787985718 (+16 ms)Flushing 1588230740/ns: appending metadata at 1733787985731 (+13 ms)Flushing 1588230740/ns: closing flushed file at 1733787985731Flushing 1588230740/table: creating writer at 1733787985746 (+15 ms)Flushing 1588230740/table: appending metadata at 1733787985765 (+19 ms)Flushing 1588230740/table: closing flushed file at 1733787985765Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@647de600: reopening flushed file at 1733787985780 (+15 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@95a62c4: reopening flushed file at 1733787985790 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@c1d6e29: reopening flushed file at 1733787985799 (+9 ms)Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 135ms, sequenceid=11, compaction requested=false at 1733787985812 (+13 ms)Writing region close event to WAL at 1733787985813 (+1 ms)Running coprocessor post-close hooks at 1733787985819 (+6 ms)Closed at 1733787985819 2024-12-09T23:46:25,819 DEBUG [RS_CLOSE_META-regionserver/2907d75fbb3e:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-09T23:46:25,835 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-12-09T23:46:25,835 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-12-09T23:46:25,876 INFO [RS:0;2907d75fbb3e:42829 {}] regionserver.HRegionServer(976): stopping server 2907d75fbb3e,42829,1733787983420; all regions closed. 2024-12-09T23:46:25,877 INFO [RS:1;2907d75fbb3e:44271 {}] regionserver.HRegionServer(976): stopping server 2907d75fbb3e,44271,1733787983451; all regions closed. 2024-12-09T23:46:25,877 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T23:46:25,877 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T23:46:25,877 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T23:46:25,877 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T23:46:25,877 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T23:46:25,877 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T23:46:25,878 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T23:46:25,878 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T23:46:25,878 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T23:46:25,878 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T23:46:25,880 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40249 is added to blk_1073741833_1009 (size=1298) 2024-12-09T23:46:25,880 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39731 is added to blk_1073741833_1009 (size=1298) 2024-12-09T23:46:25,880 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40249 is added to blk_1073741836_1012 (size=2751) 2024-12-09T23:46:25,881 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39731 is added to blk_1073741836_1012 (size=2751) 2024-12-09T23:46:25,881 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36613 is added to blk_1073741836_1012 (size=2751) 2024-12-09T23:46:25,882 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36613 is added to blk_1073741833_1009 (size=1298) 2024-12-09T23:46:25,884 DEBUG [RS:1;2907d75fbb3e:44271 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/3218311d-b9ae-7e67-6809-172703410d7a/oldWALs 2024-12-09T23:46:25,884 INFO [RS:1;2907d75fbb3e:44271 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 2907d75fbb3e%2C44271%2C1733787983451.meta:.meta(num 1733787984496) 2024-12-09T23:46:25,885 DEBUG [RS:0;2907d75fbb3e:42829 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/3218311d-b9ae-7e67-6809-172703410d7a/oldWALs 2024-12-09T23:46:25,885 INFO [RS:0;2907d75fbb3e:42829 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 2907d75fbb3e%2C42829%2C1733787983420:(num 1733787984176) 2024-12-09T23:46:25,885 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T23:46:25,885 DEBUG [RS:0;2907d75fbb3e:42829 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T23:46:25,885 INFO [RS:0;2907d75fbb3e:42829 {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T23:46:25,885 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T23:46:25,885 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T23:46:25,885 INFO [RS:0;2907d75fbb3e:42829 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-09T23:46:25,885 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T23:46:25,885 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T23:46:25,885 INFO [RS:0;2907d75fbb3e:42829 {}] hbase.ChoreService(370): Chore service for: regionserver/2907d75fbb3e:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-12-09T23:46:25,885 INFO [RS:0;2907d75fbb3e:42829 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-09T23:46:25,885 INFO [regionserver/2907d75fbb3e:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-09T23:46:25,885 INFO [RS:0;2907d75fbb3e:42829 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-09T23:46:25,886 INFO [RS:0;2907d75fbb3e:42829 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-09T23:46:25,886 INFO [RS:0;2907d75fbb3e:42829 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-09T23:46:25,886 INFO [RS:0;2907d75fbb3e:42829 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:42829 2024-12-09T23:46:25,888 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39731 is added to blk_1073741834_1010 (size=93) 2024-12-09T23:46:25,888 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40249 is added to blk_1073741834_1010 (size=93) 2024-12-09T23:46:25,888 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36613 is added to blk_1073741834_1010 (size=93) 2024-12-09T23:46:25,891 DEBUG [RS:1;2907d75fbb3e:44271 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/3218311d-b9ae-7e67-6809-172703410d7a/oldWALs 2024-12-09T23:46:25,891 INFO [RS:1;2907d75fbb3e:44271 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 2907d75fbb3e%2C44271%2C1733787983451:(num 1733787984180) 2024-12-09T23:46:25,891 DEBUG [RS:1;2907d75fbb3e:44271 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T23:46:25,891 INFO [RS:1;2907d75fbb3e:44271 {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T23:46:25,891 INFO [RS:1;2907d75fbb3e:44271 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-09T23:46:25,891 INFO [RS:1;2907d75fbb3e:44271 {}] hbase.ChoreService(370): Chore service for: regionserver/2907d75fbb3e:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-09T23:46:25,892 INFO [RS:1;2907d75fbb3e:44271 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-09T23:46:25,892 INFO [regionserver/2907d75fbb3e:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-09T23:46:25,892 INFO [RS:1;2907d75fbb3e:44271 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:44271 2024-12-09T23:46:25,905 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42829-0x1000d06dc2e0001, quorum=127.0.0.1:51082, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/2907d75fbb3e,42829,1733787983420 2024-12-09T23:46:25,905 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34049-0x1000d06dc2e0000, quorum=127.0.0.1:51082, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-09T23:46:25,905 INFO [RS:0;2907d75fbb3e:42829 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-09T23:46:25,914 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44271-0x1000d06dc2e0002, quorum=127.0.0.1:51082, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/2907d75fbb3e,44271,1733787983451 2024-12-09T23:46:25,915 INFO [RS:1;2907d75fbb3e:44271 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-09T23:46:25,923 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [2907d75fbb3e,42829,1733787983420] 2024-12-09T23:46:25,939 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/2907d75fbb3e,42829,1733787983420 already deleted, retry=false 2024-12-09T23:46:25,940 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 2907d75fbb3e,42829,1733787983420 expired; onlineServers=1 2024-12-09T23:46:25,940 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [2907d75fbb3e,44271,1733787983451] 2024-12-09T23:46:25,948 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/2907d75fbb3e,44271,1733787983451 already deleted, retry=false 2024-12-09T23:46:25,948 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 2907d75fbb3e,44271,1733787983451 expired; onlineServers=0 2024-12-09T23:46:25,948 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '2907d75fbb3e,34049,1733787983274' ***** 2024-12-09T23:46:25,948 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-09T23:46:25,948 INFO [M:0;2907d75fbb3e:34049 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-09T23:46:25,948 INFO [M:0;2907d75fbb3e:34049 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-09T23:46:25,948 DEBUG [M:0;2907d75fbb3e:34049 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-09T23:46:25,948 DEBUG [M:0;2907d75fbb3e:34049 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-09T23:46:25,948 DEBUG [master/2907d75fbb3e:0:becomeActiveMaster-HFileCleaner.large.0-1733787983845 {}] cleaner.HFileCleaner(306): Exit Thread[master/2907d75fbb3e:0:becomeActiveMaster-HFileCleaner.large.0-1733787983845,5,FailOnTimeoutGroup] 2024-12-09T23:46:25,948 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-09T23:46:25,948 DEBUG [master/2907d75fbb3e:0:becomeActiveMaster-HFileCleaner.small.0-1733787983845 {}] cleaner.HFileCleaner(306): Exit Thread[master/2907d75fbb3e:0:becomeActiveMaster-HFileCleaner.small.0-1733787983845,5,FailOnTimeoutGroup] 2024-12-09T23:46:25,948 INFO [M:0;2907d75fbb3e:34049 {}] hbase.ChoreService(370): Chore service for: master/2907d75fbb3e:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-09T23:46:25,949 INFO [M:0;2907d75fbb3e:34049 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-09T23:46:25,949 DEBUG [M:0;2907d75fbb3e:34049 {}] master.HMaster(1795): Stopping service threads 2024-12-09T23:46:25,949 INFO [M:0;2907d75fbb3e:34049 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-09T23:46:25,949 INFO [M:0;2907d75fbb3e:34049 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-09T23:46:25,949 INFO [M:0;2907d75fbb3e:34049 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-09T23:46:25,949 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-09T23:46:25,956 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34049-0x1000d06dc2e0000, quorum=127.0.0.1:51082, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-09T23:46:25,956 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34049-0x1000d06dc2e0000, quorum=127.0.0.1:51082, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T23:46:25,956 DEBUG [M:0;2907d75fbb3e:34049 {}] zookeeper.ZKUtil(347): master:34049-0x1000d06dc2e0000, quorum=127.0.0.1:51082, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-09T23:46:25,956 WARN [M:0;2907d75fbb3e:34049 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-09T23:46:25,957 INFO [M:0;2907d75fbb3e:34049 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:41235/user/jenkins/test-data/3218311d-b9ae-7e67-6809-172703410d7a/.lastflushedseqids 2024-12-09T23:46:25,959 WARN [IPC Server handler 1 on default port 41235 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-09T23:46:25,959 WARN [IPC Server handler 1 on default port 41235 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-09T23:46:25,959 WARN [IPC Server handler 1 on default port 41235 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-09T23:46:25,966 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40249 is added to blk_1073741843_1019 (size=127) 2024-12-09T23:46:25,967 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36613 is added to blk_1073741843_1019 (size=127) 2024-12-09T23:46:25,967 INFO [M:0;2907d75fbb3e:34049 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-09T23:46:25,967 INFO [M:0;2907d75fbb3e:34049 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-09T23:46:25,967 DEBUG [M:0;2907d75fbb3e:34049 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-09T23:46:25,967 INFO [M:0;2907d75fbb3e:34049 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T23:46:25,967 DEBUG [M:0;2907d75fbb3e:34049 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T23:46:25,967 DEBUG [M:0;2907d75fbb3e:34049 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-09T23:46:25,967 DEBUG [M:0;2907d75fbb3e:34049 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T23:46:25,967 INFO [M:0;2907d75fbb3e:34049 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=26.84 KB heapSize=34.13 KB 2024-12-09T23:46:25,983 DEBUG [M:0;2907d75fbb3e:34049 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41235/user/jenkins/test-data/3218311d-b9ae-7e67-6809-172703410d7a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/38246a1ddee44c78be018f9abb81d66b is 82, key is hbase:meta,,1/info:regioninfo/1733787984532/Put/seqid=0 2024-12-09T23:46:25,985 WARN [IPC Server handler 0 on default port 41235 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-09T23:46:25,985 WARN [IPC Server handler 0 on default port 41235 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-09T23:46:25,985 WARN [IPC Server handler 0 on default port 41235 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-09T23:46:25,991 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36613 is added to blk_1073741844_1020 (size=5672) 2024-12-09T23:46:25,992 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40249 is added to blk_1073741844_1020 (size=5672) 2024-12-09T23:46:26,023 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42829-0x1000d06dc2e0001, quorum=127.0.0.1:51082, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T23:46:26,023 INFO [RS:0;2907d75fbb3e:42829 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-09T23:46:26,023 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42829-0x1000d06dc2e0001, quorum=127.0.0.1:51082, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T23:46:26,023 INFO [RS:0;2907d75fbb3e:42829 {}] regionserver.HRegionServer(1031): Exiting; stopping=2907d75fbb3e,42829,1733787983420; zookeeper connection closed. 2024-12-09T23:46:26,023 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@4a428eba {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@4a428eba 2024-12-09T23:46:26,031 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44271-0x1000d06dc2e0002, quorum=127.0.0.1:51082, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T23:46:26,031 INFO [RS:1;2907d75fbb3e:44271 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-09T23:46:26,031 INFO [RS:1;2907d75fbb3e:44271 {}] regionserver.HRegionServer(1031): Exiting; stopping=2907d75fbb3e,44271,1733787983451; zookeeper connection closed. 2024-12-09T23:46:26,031 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44271-0x1000d06dc2e0002, quorum=127.0.0.1:51082, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T23:46:26,032 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@7d16fa7e {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@7d16fa7e 2024-12-09T23:46:26,032 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 3 regionserver(s) complete 2024-12-09T23:46:26,393 INFO [M:0;2907d75fbb3e:34049 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:41235/user/jenkins/test-data/3218311d-b9ae-7e67-6809-172703410d7a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/38246a1ddee44c78be018f9abb81d66b 2024-12-09T23:46:26,418 DEBUG [M:0;2907d75fbb3e:34049 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41235/user/jenkins/test-data/3218311d-b9ae-7e67-6809-172703410d7a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/d79a63d61e06436e94e7b3c263284742 is 749, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1733787985126/Put/seqid=0 2024-12-09T23:46:26,424 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39731 is added to blk_1073741845_1021 (size=6440) 2024-12-09T23:46:26,424 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36613 is added to blk_1073741845_1021 (size=6440) 2024-12-09T23:46:26,425 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40249 is added to blk_1073741845_1021 (size=6440) 2024-12-09T23:46:26,426 INFO [M:0;2907d75fbb3e:34049 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.16 KB at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:41235/user/jenkins/test-data/3218311d-b9ae-7e67-6809-172703410d7a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/d79a63d61e06436e94e7b3c263284742 2024-12-09T23:46:26,448 DEBUG [M:0;2907d75fbb3e:34049 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41235/user/jenkins/test-data/3218311d-b9ae-7e67-6809-172703410d7a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/5c2df34361844c89938513aba2d1a251 is 69, key is 2907d75fbb3e,41843,1733787983487/rs:state/1733787983975/Put/seqid=0 2024-12-09T23:46:26,455 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40249 is added to blk_1073741846_1022 (size=5294) 2024-12-09T23:46:26,455 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39731 is added to blk_1073741846_1022 (size=5294) 2024-12-09T23:46:26,455 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36613 is added to blk_1073741846_1022 (size=5294) 2024-12-09T23:46:26,456 INFO [M:0;2907d75fbb3e:34049 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=195 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:41235/user/jenkins/test-data/3218311d-b9ae-7e67-6809-172703410d7a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/5c2df34361844c89938513aba2d1a251 2024-12-09T23:46:26,463 DEBUG [M:0;2907d75fbb3e:34049 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41235/user/jenkins/test-data/3218311d-b9ae-7e67-6809-172703410d7a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/38246a1ddee44c78be018f9abb81d66b as hdfs://localhost:41235/user/jenkins/test-data/3218311d-b9ae-7e67-6809-172703410d7a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/38246a1ddee44c78be018f9abb81d66b 2024-12-09T23:46:26,470 INFO [M:0;2907d75fbb3e:34049 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41235/user/jenkins/test-data/3218311d-b9ae-7e67-6809-172703410d7a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/38246a1ddee44c78be018f9abb81d66b, entries=8, sequenceid=72, filesize=5.5 K 2024-12-09T23:46:26,472 DEBUG [M:0;2907d75fbb3e:34049 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41235/user/jenkins/test-data/3218311d-b9ae-7e67-6809-172703410d7a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/d79a63d61e06436e94e7b3c263284742 as hdfs://localhost:41235/user/jenkins/test-data/3218311d-b9ae-7e67-6809-172703410d7a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/d79a63d61e06436e94e7b3c263284742 2024-12-09T23:46:26,478 INFO [M:0;2907d75fbb3e:34049 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41235/user/jenkins/test-data/3218311d-b9ae-7e67-6809-172703410d7a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/d79a63d61e06436e94e7b3c263284742, entries=8, sequenceid=72, filesize=6.3 K 2024-12-09T23:46:26,479 DEBUG [M:0;2907d75fbb3e:34049 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41235/user/jenkins/test-data/3218311d-b9ae-7e67-6809-172703410d7a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/5c2df34361844c89938513aba2d1a251 as hdfs://localhost:41235/user/jenkins/test-data/3218311d-b9ae-7e67-6809-172703410d7a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/5c2df34361844c89938513aba2d1a251 2024-12-09T23:46:26,486 INFO [M:0;2907d75fbb3e:34049 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41235/user/jenkins/test-data/3218311d-b9ae-7e67-6809-172703410d7a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/5c2df34361844c89938513aba2d1a251, entries=3, sequenceid=72, filesize=5.2 K 2024-12-09T23:46:26,488 INFO [M:0;2907d75fbb3e:34049 {}] regionserver.HRegion(3140): Finished flush of dataSize ~26.84 KB/27483, heapSize ~33.83 KB/34640, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 521ms, sequenceid=72, compaction requested=false 2024-12-09T23:46:26,489 INFO [M:0;2907d75fbb3e:34049 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T23:46:26,489 DEBUG [M:0;2907d75fbb3e:34049 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733787985967Disabling compacts and flushes for region at 1733787985967Disabling writes for close at 1733787985967Obtaining lock to block concurrent updates at 1733787985967Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733787985967Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=27483, getHeapSize=34880, getOffHeapSize=0, getCellsCount=85 at 1733787985968 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733787985969 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733787985969Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733787985983 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733787985983Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733787986404 (+421 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733787986418 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733787986418Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733787986432 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733787986448 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733787986448Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5a34749: reopening flushed file at 1733787986462 (+14 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@48f7cd8c: reopening flushed file at 1733787986471 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5f8ee742: reopening flushed file at 1733787986478 (+7 ms)Finished flush of dataSize ~26.84 KB/27483, heapSize ~33.83 KB/34640, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 521ms, sequenceid=72, compaction requested=false at 1733787986488 (+10 ms)Writing region close event to WAL at 1733787986489 (+1 ms)Closed at 1733787986489 2024-12-09T23:46:26,490 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T23:46:26,490 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T23:46:26,490 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T23:46:26,490 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T23:46:26,490 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T23:46:26,492 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40249 is added to blk_1073741830_1006 (size=32686) 2024-12-09T23:46:26,492 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36613 is added to blk_1073741830_1006 (size=32686) 2024-12-09T23:46:26,493 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39731 is added to blk_1073741830_1006 (size=32686) 2024-12-09T23:46:26,493 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-09T23:46:26,493 INFO [M:0;2907d75fbb3e:34049 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-09T23:46:26,493 INFO [M:0;2907d75fbb3e:34049 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:34049 2024-12-09T23:46:26,493 INFO [M:0;2907d75fbb3e:34049 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-09T23:46:26,623 INFO [M:0;2907d75fbb3e:34049 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-09T23:46:26,623 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34049-0x1000d06dc2e0000, quorum=127.0.0.1:51082, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T23:46:26,623 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34049-0x1000d06dc2e0000, quorum=127.0.0.1:51082, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T23:46:26,629 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7c3d82b3{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T23:46:26,630 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@190023f0{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T23:46:26,630 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T23:46:26,630 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3c97cc8b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T23:46:26,631 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@e74bc8b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7a4c5582-8dd7-2e07-0b95-3a62344f12d3/hadoop.log.dir/,STOPPED} 2024-12-09T23:46:26,634 WARN [BP-534286281-172.17.0.2-1733787981300 heartbeating to localhost/127.0.0.1:41235 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T23:46:26,634 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T23:46:26,634 WARN [BP-534286281-172.17.0.2-1733787981300 heartbeating to localhost/127.0.0.1:41235 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-534286281-172.17.0.2-1733787981300 (Datanode Uuid 5b161dbd-cecb-417a-81f5-16f8578d89b8) service to localhost/127.0.0.1:41235 2024-12-09T23:46:26,634 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T23:46:26,634 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7a4c5582-8dd7-2e07-0b95-3a62344f12d3/cluster_adc1a7e4-34ab-ed27-bffd-4111344671b0/data/data5/current/BP-534286281-172.17.0.2-1733787981300 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T23:46:26,635 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7a4c5582-8dd7-2e07-0b95-3a62344f12d3/cluster_adc1a7e4-34ab-ed27-bffd-4111344671b0/data/data6/current/BP-534286281-172.17.0.2-1733787981300 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T23:46:26,635 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T23:46:26,636 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@25cc5f4d{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T23:46:26,637 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6686fe53{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T23:46:26,637 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T23:46:26,637 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@72785dee{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T23:46:26,637 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5c773fd1{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7a4c5582-8dd7-2e07-0b95-3a62344f12d3/hadoop.log.dir/,STOPPED} 2024-12-09T23:46:26,638 WARN [BP-534286281-172.17.0.2-1733787981300 heartbeating to localhost/127.0.0.1:41235 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T23:46:26,638 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T23:46:26,638 WARN [BP-534286281-172.17.0.2-1733787981300 heartbeating to localhost/127.0.0.1:41235 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-534286281-172.17.0.2-1733787981300 (Datanode Uuid 96b9268c-1c16-46f2-8668-71fb6ba87d29) service to localhost/127.0.0.1:41235 2024-12-09T23:46:26,638 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T23:46:26,639 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7a4c5582-8dd7-2e07-0b95-3a62344f12d3/cluster_adc1a7e4-34ab-ed27-bffd-4111344671b0/data/data3/current/BP-534286281-172.17.0.2-1733787981300 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T23:46:26,639 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7a4c5582-8dd7-2e07-0b95-3a62344f12d3/cluster_adc1a7e4-34ab-ed27-bffd-4111344671b0/data/data4/current/BP-534286281-172.17.0.2-1733787981300 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T23:46:26,639 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T23:46:26,641 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@11cbed31{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T23:46:26,641 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@11812ea4{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T23:46:26,641 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T23:46:26,641 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@68e19264{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T23:46:26,642 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5e23bf16{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7a4c5582-8dd7-2e07-0b95-3a62344f12d3/hadoop.log.dir/,STOPPED} 2024-12-09T23:46:26,643 WARN [BP-534286281-172.17.0.2-1733787981300 heartbeating to localhost/127.0.0.1:41235 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T23:46:26,643 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T23:46:26,643 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T23:46:26,643 WARN [BP-534286281-172.17.0.2-1733787981300 heartbeating to localhost/127.0.0.1:41235 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-534286281-172.17.0.2-1733787981300 (Datanode Uuid 9d69277e-4fd5-4466-a524-6a703d451e5e) service to localhost/127.0.0.1:41235 2024-12-09T23:46:26,643 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7a4c5582-8dd7-2e07-0b95-3a62344f12d3/cluster_adc1a7e4-34ab-ed27-bffd-4111344671b0/data/data1/current/BP-534286281-172.17.0.2-1733787981300 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T23:46:26,644 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7a4c5582-8dd7-2e07-0b95-3a62344f12d3/cluster_adc1a7e4-34ab-ed27-bffd-4111344671b0/data/data2/current/BP-534286281-172.17.0.2-1733787981300 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T23:46:26,644 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T23:46:26,649 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@10db3f46{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-09T23:46:26,650 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@43a7f4cb{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T23:46:26,650 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T23:46:26,650 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4072566{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T23:46:26,650 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@17794d45{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7a4c5582-8dd7-2e07-0b95-3a62344f12d3/hadoop.log.dir/,STOPPED} 2024-12-09T23:46:26,657 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-09T23:46:26,681 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-09T23:46:26,687 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestHBaseWalOnEC#testReadWrite[1] Thread=148 (was 86) - Thread LEAK? -, OpenFileDescriptor=516 (was 453) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=242 (was 220) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=4952 (was 5127)