2024-12-10 11:46:16,339 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba 2024-12-10 11:46:16,350 main DEBUG Took 0.009576 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-12-10 11:46:16,351 main DEBUG PluginManager 'Core' found 129 plugins 2024-12-10 11:46:16,351 main DEBUG PluginManager 'Level' found 0 plugins 2024-12-10 11:46:16,352 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-12-10 11:46:16,353 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-10 11:46:16,361 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-12-10 11:46:16,376 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-10 11:46:16,377 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-10 11:46:16,378 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-10 11:46:16,378 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-10 11:46:16,378 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-10 11:46:16,379 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-10 11:46:16,379 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-10 11:46:16,380 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-10 11:46:16,380 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-10 11:46:16,380 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-10 11:46:16,381 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-10 11:46:16,381 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-10 11:46:16,382 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-10 11:46:16,382 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-10 11:46:16,382 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-10 11:46:16,382 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-10 11:46:16,383 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-10 11:46:16,383 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-10 11:46:16,383 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-10 11:46:16,384 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-10 11:46:16,384 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-10 11:46:16,384 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-10 11:46:16,385 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-10 11:46:16,385 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-10 11:46:16,385 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-10 11:46:16,385 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-12-10 11:46:16,387 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-10 11:46:16,388 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-12-10 11:46:16,389 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-12-10 11:46:16,390 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-12-10 11:46:16,391 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-12-10 11:46:16,391 main DEBUG PluginManager 'Converter' found 47 plugins 2024-12-10 11:46:16,398 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-12-10 11:46:16,400 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-12-10 11:46:16,402 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-12-10 11:46:16,402 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-12-10 11:46:16,403 main DEBUG createAppenders(={Console}) 2024-12-10 11:46:16,403 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba initialized 2024-12-10 11:46:16,404 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba 2024-12-10 11:46:16,404 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba OK. 2024-12-10 11:46:16,404 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-12-10 11:46:16,405 main DEBUG OutputStream closed 2024-12-10 11:46:16,405 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-12-10 11:46:16,405 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-12-10 11:46:16,405 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@49c7b90e OK 2024-12-10 11:46:16,479 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-12-10 11:46:16,482 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-12-10 11:46:16,483 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-12-10 11:46:16,485 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-12-10 11:46:16,485 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-12-10 11:46:16,486 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-12-10 11:46:16,486 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-12-10 11:46:16,487 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-12-10 11:46:16,487 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-12-10 11:46:16,488 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-12-10 11:46:16,488 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-12-10 11:46:16,488 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-12-10 11:46:16,489 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-12-10 11:46:16,489 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-12-10 11:46:16,490 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-12-10 11:46:16,490 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-12-10 11:46:16,490 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-12-10 11:46:16,491 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-12-10 11:46:16,494 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-10 11:46:16,494 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-logging/target/hbase-logging-4.0.0-alpha-1-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@35432107) with optional ClassLoader: null 2024-12-10 11:46:16,494 main DEBUG Shutdown hook enabled. Registering a new one. 2024-12-10 11:46:16,495 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@35432107] started OK. 2024-12-10T11:46:16,514 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC timeout: 26 mins 2024-12-10 11:46:16,517 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-12-10 11:46:16,518 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-10T11:46:16,748 DEBUG [main {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7aff932c-c629-6115-2de0-fdda12f4f96d 2024-12-10T11:46:16,772 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7aff932c-c629-6115-2de0-fdda12f4f96d/cluster_4825df41-c912-d781-3feb-722196b52d31, deleteOnExit=true 2024-12-10T11:46:16,773 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7aff932c-c629-6115-2de0-fdda12f4f96d/test.cache.data in system properties and HBase conf 2024-12-10T11:46:16,774 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7aff932c-c629-6115-2de0-fdda12f4f96d/hadoop.tmp.dir in system properties and HBase conf 2024-12-10T11:46:16,775 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7aff932c-c629-6115-2de0-fdda12f4f96d/hadoop.log.dir in system properties and HBase conf 2024-12-10T11:46:16,776 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7aff932c-c629-6115-2de0-fdda12f4f96d/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-10T11:46:16,776 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7aff932c-c629-6115-2de0-fdda12f4f96d/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-10T11:46:16,777 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-10T11:46:16,864 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-12-10T11:46:16,948 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-10T11:46:16,952 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7aff932c-c629-6115-2de0-fdda12f4f96d/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-10T11:46:16,952 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7aff932c-c629-6115-2de0-fdda12f4f96d/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-10T11:46:16,953 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7aff932c-c629-6115-2de0-fdda12f4f96d/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-10T11:46:16,953 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7aff932c-c629-6115-2de0-fdda12f4f96d/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-10T11:46:16,954 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7aff932c-c629-6115-2de0-fdda12f4f96d/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-10T11:46:16,954 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7aff932c-c629-6115-2de0-fdda12f4f96d/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-10T11:46:16,954 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7aff932c-c629-6115-2de0-fdda12f4f96d/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-10T11:46:16,955 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7aff932c-c629-6115-2de0-fdda12f4f96d/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-10T11:46:16,955 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7aff932c-c629-6115-2de0-fdda12f4f96d/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-10T11:46:16,956 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7aff932c-c629-6115-2de0-fdda12f4f96d/nfs.dump.dir in system properties and HBase conf 2024-12-10T11:46:16,956 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7aff932c-c629-6115-2de0-fdda12f4f96d/java.io.tmpdir in system properties and HBase conf 2024-12-10T11:46:16,956 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7aff932c-c629-6115-2de0-fdda12f4f96d/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-10T11:46:16,957 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7aff932c-c629-6115-2de0-fdda12f4f96d/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-10T11:46:16,957 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7aff932c-c629-6115-2de0-fdda12f4f96d/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-10T11:46:17,935 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-12-10T11:46:18,001 INFO [Time-limited test {}] log.Log(170): Logging initialized @2220ms to org.eclipse.jetty.util.log.Slf4jLog 2024-12-10T11:46:18,065 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-10T11:46:18,122 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-10T11:46:18,141 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-10T11:46:18,142 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-10T11:46:18,143 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-10T11:46:18,154 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-10T11:46:18,157 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@21b7d177{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7aff932c-c629-6115-2de0-fdda12f4f96d/hadoop.log.dir/,AVAILABLE} 2024-12-10T11:46:18,158 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@383d55e4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-10T11:46:18,315 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@76e4c45c{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7aff932c-c629-6115-2de0-fdda12f4f96d/java.io.tmpdir/jetty-localhost-43407-hadoop-hdfs-3_4_1-tests_jar-_-any-2108940810153922426/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-10T11:46:18,321 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4637aff6{HTTP/1.1, (http/1.1)}{localhost:43407} 2024-12-10T11:46:18,322 INFO [Time-limited test {}] server.Server(415): Started @2542ms 2024-12-10T11:46:18,794 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-10T11:46:18,800 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-10T11:46:18,801 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-10T11:46:18,801 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-10T11:46:18,801 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-10T11:46:18,802 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@550154bd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7aff932c-c629-6115-2de0-fdda12f4f96d/hadoop.log.dir/,AVAILABLE} 2024-12-10T11:46:18,803 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1a2478ad{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-10T11:46:18,897 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4839957b{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7aff932c-c629-6115-2de0-fdda12f4f96d/java.io.tmpdir/jetty-localhost-34631-hadoop-hdfs-3_4_1-tests_jar-_-any-12933865626132139979/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-10T11:46:18,897 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5306f615{HTTP/1.1, (http/1.1)}{localhost:34631} 2024-12-10T11:46:18,898 INFO [Time-limited test {}] server.Server(415): Started @3118ms 2024-12-10T11:46:18,941 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-10T11:46:19,037 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-10T11:46:19,043 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-10T11:46:19,047 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-10T11:46:19,047 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-10T11:46:19,048 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-10T11:46:19,049 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6463ad04{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7aff932c-c629-6115-2de0-fdda12f4f96d/hadoop.log.dir/,AVAILABLE} 2024-12-10T11:46:19,050 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7fa8fa5c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-10T11:46:19,159 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1c6b8f01{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7aff932c-c629-6115-2de0-fdda12f4f96d/java.io.tmpdir/jetty-localhost-46291-hadoop-hdfs-3_4_1-tests_jar-_-any-16634796347000632766/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-10T11:46:19,160 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@11f28dd2{HTTP/1.1, (http/1.1)}{localhost:46291} 2024-12-10T11:46:19,160 INFO [Time-limited test {}] server.Server(415): Started @3380ms 2024-12-10T11:46:19,162 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-10T11:46:19,209 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-10T11:46:19,214 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-10T11:46:19,216 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-10T11:46:19,216 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-10T11:46:19,216 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-10T11:46:19,217 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@c62369b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7aff932c-c629-6115-2de0-fdda12f4f96d/hadoop.log.dir/,AVAILABLE} 2024-12-10T11:46:19,218 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@24f92c39{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-10T11:46:19,310 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2e59159d{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7aff932c-c629-6115-2de0-fdda12f4f96d/java.io.tmpdir/jetty-localhost-34729-hadoop-hdfs-3_4_1-tests_jar-_-any-15532478802787478349/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-10T11:46:19,311 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@a8e922f{HTTP/1.1, (http/1.1)}{localhost:34729} 2024-12-10T11:46:19,311 INFO [Time-limited test {}] server.Server(415): Started @3531ms 2024-12-10T11:46:19,313 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-10T11:46:20,222 WARN [Thread-124 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7aff932c-c629-6115-2de0-fdda12f4f96d/cluster_4825df41-c912-d781-3feb-722196b52d31/data/data3/current/BP-1050097718-172.17.0.2-1733831177438/current, will proceed with Du for space computation calculation, 2024-12-10T11:46:20,222 WARN [Thread-125 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7aff932c-c629-6115-2de0-fdda12f4f96d/cluster_4825df41-c912-d781-3feb-722196b52d31/data/data4/current/BP-1050097718-172.17.0.2-1733831177438/current, will proceed with Du for space computation calculation, 2024-12-10T11:46:20,222 WARN [Thread-122 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7aff932c-c629-6115-2de0-fdda12f4f96d/cluster_4825df41-c912-d781-3feb-722196b52d31/data/data1/current/BP-1050097718-172.17.0.2-1733831177438/current, will proceed with Du for space computation calculation, 2024-12-10T11:46:20,222 WARN [Thread-123 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7aff932c-c629-6115-2de0-fdda12f4f96d/cluster_4825df41-c912-d781-3feb-722196b52d31/data/data2/current/BP-1050097718-172.17.0.2-1733831177438/current, will proceed with Du for space computation calculation, 2024-12-10T11:46:20,248 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-10T11:46:20,248 WARN [Thread-81 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-10T11:46:20,259 WARN [Thread-143 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7aff932c-c629-6115-2de0-fdda12f4f96d/cluster_4825df41-c912-d781-3feb-722196b52d31/data/data6/current/BP-1050097718-172.17.0.2-1733831177438/current, will proceed with Du for space computation calculation, 2024-12-10T11:46:20,259 WARN [Thread-142 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7aff932c-c629-6115-2de0-fdda12f4f96d/cluster_4825df41-c912-d781-3feb-722196b52d31/data/data5/current/BP-1050097718-172.17.0.2-1733831177438/current, will proceed with Du for space computation calculation, 2024-12-10T11:46:20,279 WARN [Thread-103 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-10T11:46:20,290 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xe7fba2eb41b189d5 with lease ID 0x78545152cca636b0: Processing first storage report for DS-55f19d74-cb7e-4695-93f8-9554f6e22f52 from datanode DatanodeRegistration(127.0.0.1:43797, datanodeUuid=fa319895-7728-4d59-b387-962e0eca2518, infoPort=43525, infoSecurePort=0, ipcPort=36625, storageInfo=lv=-57;cid=testClusterID;nsid=583154481;c=1733831177438) 2024-12-10T11:46:20,291 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe7fba2eb41b189d5 with lease ID 0x78545152cca636b0: from storage DS-55f19d74-cb7e-4695-93f8-9554f6e22f52 node DatanodeRegistration(127.0.0.1:43797, datanodeUuid=fa319895-7728-4d59-b387-962e0eca2518, infoPort=43525, infoSecurePort=0, ipcPort=36625, storageInfo=lv=-57;cid=testClusterID;nsid=583154481;c=1733831177438), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-10T11:46:20,292 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xf8319b88f5a94be9 with lease ID 0x78545152cca636b1: Processing first storage report for DS-d3c7f28e-8dd8-4a16-81c0-9f69b8e63380 from datanode DatanodeRegistration(127.0.0.1:33591, datanodeUuid=56ba2a47-76fb-4fd2-b03e-8537a18173f8, infoPort=34881, infoSecurePort=0, ipcPort=35661, storageInfo=lv=-57;cid=testClusterID;nsid=583154481;c=1733831177438) 2024-12-10T11:46:20,292 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf8319b88f5a94be9 with lease ID 0x78545152cca636b1: from storage DS-d3c7f28e-8dd8-4a16-81c0-9f69b8e63380 node DatanodeRegistration(127.0.0.1:33591, datanodeUuid=56ba2a47-76fb-4fd2-b03e-8537a18173f8, infoPort=34881, infoSecurePort=0, ipcPort=35661, storageInfo=lv=-57;cid=testClusterID;nsid=583154481;c=1733831177438), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-10T11:46:20,292 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xeff15570605ee6e7 with lease ID 0x78545152cca636af: Processing first storage report for DS-2f7c98c9-d0e7-4c9f-87fa-2c4211e06011 from datanode DatanodeRegistration(127.0.0.1:38893, datanodeUuid=b09f0da5-dbd2-43d7-a8cf-d987533785f3, infoPort=35301, infoSecurePort=0, ipcPort=45337, storageInfo=lv=-57;cid=testClusterID;nsid=583154481;c=1733831177438) 2024-12-10T11:46:20,292 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xeff15570605ee6e7 with lease ID 0x78545152cca636af: from storage DS-2f7c98c9-d0e7-4c9f-87fa-2c4211e06011 node DatanodeRegistration(127.0.0.1:38893, datanodeUuid=b09f0da5-dbd2-43d7-a8cf-d987533785f3, infoPort=35301, infoSecurePort=0, ipcPort=45337, storageInfo=lv=-57;cid=testClusterID;nsid=583154481;c=1733831177438), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-10T11:46:20,292 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xe7fba2eb41b189d5 with lease ID 0x78545152cca636b0: Processing first storage report for DS-b66a202c-48bb-4de8-93d4-f2b36f83481c from datanode DatanodeRegistration(127.0.0.1:43797, datanodeUuid=fa319895-7728-4d59-b387-962e0eca2518, infoPort=43525, infoSecurePort=0, ipcPort=36625, storageInfo=lv=-57;cid=testClusterID;nsid=583154481;c=1733831177438) 2024-12-10T11:46:20,293 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe7fba2eb41b189d5 with lease ID 0x78545152cca636b0: from storage DS-b66a202c-48bb-4de8-93d4-f2b36f83481c node DatanodeRegistration(127.0.0.1:43797, datanodeUuid=fa319895-7728-4d59-b387-962e0eca2518, infoPort=43525, infoSecurePort=0, ipcPort=36625, storageInfo=lv=-57;cid=testClusterID;nsid=583154481;c=1733831177438), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-10T11:46:20,293 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xf8319b88f5a94be9 with lease ID 0x78545152cca636b1: Processing first storage report for DS-6b35ae55-0db9-479a-b59a-b5e9b2980515 from datanode DatanodeRegistration(127.0.0.1:33591, datanodeUuid=56ba2a47-76fb-4fd2-b03e-8537a18173f8, infoPort=34881, infoSecurePort=0, ipcPort=35661, storageInfo=lv=-57;cid=testClusterID;nsid=583154481;c=1733831177438) 2024-12-10T11:46:20,293 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf8319b88f5a94be9 with lease ID 0x78545152cca636b1: from storage DS-6b35ae55-0db9-479a-b59a-b5e9b2980515 node DatanodeRegistration(127.0.0.1:33591, datanodeUuid=56ba2a47-76fb-4fd2-b03e-8537a18173f8, infoPort=34881, infoSecurePort=0, ipcPort=35661, storageInfo=lv=-57;cid=testClusterID;nsid=583154481;c=1733831177438), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-10T11:46:20,293 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xeff15570605ee6e7 with lease ID 0x78545152cca636af: Processing first storage report for DS-a6e4e7db-6b29-4c71-aa3f-7a2574d4c51d from datanode DatanodeRegistration(127.0.0.1:38893, datanodeUuid=b09f0da5-dbd2-43d7-a8cf-d987533785f3, infoPort=35301, infoSecurePort=0, ipcPort=45337, storageInfo=lv=-57;cid=testClusterID;nsid=583154481;c=1733831177438) 2024-12-10T11:46:20,293 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xeff15570605ee6e7 with lease ID 0x78545152cca636af: from storage DS-a6e4e7db-6b29-4c71-aa3f-7a2574d4c51d node DatanodeRegistration(127.0.0.1:38893, datanodeUuid=b09f0da5-dbd2-43d7-a8cf-d987533785f3, infoPort=35301, infoSecurePort=0, ipcPort=45337, storageInfo=lv=-57;cid=testClusterID;nsid=583154481;c=1733831177438), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-10T11:46:20,313 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7aff932c-c629-6115-2de0-fdda12f4f96d 2024-12-10T11:46:20,381 WARN [Time-limited test {}] erasurecode.ErasureCodeNative(55): ISA-L support is not available in your platform... using builtin-java codec where applicable 2024-12-10T11:46:20,428 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestHBaseWalOnEC#testReadWrite[0] Thread=157, OpenFileDescriptor=391, MaxFileDescriptor=1048576, SystemLoadAverage=172, ProcessCount=11, AvailableMemoryMB=7355 2024-12-10T11:46:20,430 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=3, rsPorts=, rsClass=null, numDataNodes=3, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-10T11:46:20,436 INFO [Time-limited test {}] hbase.HBaseTestingUtil(821): NOT STARTING DFS 2024-12-10T11:46:20,525 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7aff932c-c629-6115-2de0-fdda12f4f96d/cluster_4825df41-c912-d781-3feb-722196b52d31/zookeeper_0, clientPort=59713, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7aff932c-c629-6115-2de0-fdda12f4f96d/cluster_4825df41-c912-d781-3feb-722196b52d31/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7aff932c-c629-6115-2de0-fdda12f4f96d/cluster_4825df41-c912-d781-3feb-722196b52d31/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-10T11:46:20,536 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=59713 2024-12-10T11:46:20,544 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-10T11:46:20,547 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-10T11:46:20,630 WARN [Time-limited test {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-10T11:46:20,630 WARN [Time-limited test {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-10T11:46:20,674 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-884346255_22 at /127.0.0.1:53674 [Receiving block BP-1050097718-172.17.0.2-1733831177438:blk_-9223372036854775792_1001] {}] datanode.DataXceiver(331): 127.0.0.1:43797:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:53674 dst: /127.0.0.1:43797 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T11:46:20,695 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43797 is added to blk_-9223372036854775792_1002 (size=7) 2024-12-10T11:46:21,092 WARN [Time-limited test {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-10T11:46:21,106 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:45975/user/jenkins/test-data/f70f2c4e-74b6-0388-0390-35db2b15c52d with version=8 2024-12-10T11:46:21,106 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:45975/user/jenkins/test-data/f70f2c4e-74b6-0388-0390-35db2b15c52d/hbase-staging 2024-12-10T11:46:21,186 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-12-10T11:46:21,432 INFO [Time-limited test {}] client.ConnectionUtils(128): master/ef751fafe6b1:0 server-side Connection retries=45 2024-12-10T11:46:21,440 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-10T11:46:21,440 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-10T11:46:21,444 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-10T11:46:21,445 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-10T11:46:21,445 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-10T11:46:21,548 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-10T11:46:21,594 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-12-10T11:46:21,601 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-12-10T11:46:21,604 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-10T11:46:21,625 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 19701 (auto-detected) 2024-12-10T11:46:21,626 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-12-10T11:46:21,642 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:40549 2024-12-10T11:46:21,659 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:40549 connecting to ZooKeeper ensemble=127.0.0.1:59713 2024-12-10T11:46:21,785 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:405490x0, quorum=127.0.0.1:59713, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-10T11:46:21,788 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:40549-0x1000f99ff790000 connected 2024-12-10T11:46:21,881 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-10T11:46:21,883 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-10T11:46:21,894 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:40549-0x1000f99ff790000, quorum=127.0.0.1:59713, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-10T11:46:21,898 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:45975/user/jenkins/test-data/f70f2c4e-74b6-0388-0390-35db2b15c52d, hbase.cluster.distributed=false 2024-12-10T11:46:21,917 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:40549-0x1000f99ff790000, quorum=127.0.0.1:59713, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-10T11:46:21,922 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=40549 2024-12-10T11:46:21,922 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=40549 2024-12-10T11:46:21,923 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=40549 2024-12-10T11:46:21,923 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=40549 2024-12-10T11:46:21,923 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=40549 2024-12-10T11:46:22,008 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/ef751fafe6b1:0 server-side Connection retries=45 2024-12-10T11:46:22,010 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-10T11:46:22,010 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-10T11:46:22,010 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-10T11:46:22,010 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-10T11:46:22,010 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-10T11:46:22,012 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-10T11:46:22,014 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-10T11:46:22,015 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:36793 2024-12-10T11:46:22,016 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:36793 connecting to ZooKeeper ensemble=127.0.0.1:59713 2024-12-10T11:46:22,017 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-10T11:46:22,020 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-10T11:46:22,051 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:367930x0, quorum=127.0.0.1:59713, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-10T11:46:22,052 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:36793-0x1000f99ff790001 connected 2024-12-10T11:46:22,052 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36793-0x1000f99ff790001, quorum=127.0.0.1:59713, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-10T11:46:22,057 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-10T11:46:22,067 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-10T11:46:22,070 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36793-0x1000f99ff790001, quorum=127.0.0.1:59713, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-10T11:46:22,076 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36793-0x1000f99ff790001, quorum=127.0.0.1:59713, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-10T11:46:22,077 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=36793 2024-12-10T11:46:22,077 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=36793 2024-12-10T11:46:22,078 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=36793 2024-12-10T11:46:22,078 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=36793 2024-12-10T11:46:22,078 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=36793 2024-12-10T11:46:22,094 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/ef751fafe6b1:0 server-side Connection retries=45 2024-12-10T11:46:22,094 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-10T11:46:22,094 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-10T11:46:22,095 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-10T11:46:22,095 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-10T11:46:22,095 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-10T11:46:22,095 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-10T11:46:22,096 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-10T11:46:22,097 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:43719 2024-12-10T11:46:22,098 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:43719 connecting to ZooKeeper ensemble=127.0.0.1:59713 2024-12-10T11:46:22,100 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-10T11:46:22,104 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-10T11:46:22,138 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:437190x0, quorum=127.0.0.1:59713, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-10T11:46:22,139 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:43719-0x1000f99ff790002 connected 2024-12-10T11:46:22,139 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:43719-0x1000f99ff790002, quorum=127.0.0.1:59713, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-10T11:46:22,140 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-10T11:46:22,141 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-10T11:46:22,143 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:43719-0x1000f99ff790002, quorum=127.0.0.1:59713, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-10T11:46:22,145 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:43719-0x1000f99ff790002, quorum=127.0.0.1:59713, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-10T11:46:22,146 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=43719 2024-12-10T11:46:22,146 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=43719 2024-12-10T11:46:22,147 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=43719 2024-12-10T11:46:22,148 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=43719 2024-12-10T11:46:22,148 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=43719 2024-12-10T11:46:22,166 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/ef751fafe6b1:0 server-side Connection retries=45 2024-12-10T11:46:22,166 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-10T11:46:22,166 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-10T11:46:22,166 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-10T11:46:22,166 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-10T11:46:22,167 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-10T11:46:22,167 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-10T11:46:22,167 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-10T11:46:22,168 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:37723 2024-12-10T11:46:22,169 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:37723 connecting to ZooKeeper ensemble=127.0.0.1:59713 2024-12-10T11:46:22,170 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-10T11:46:22,173 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-10T11:46:22,201 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:377230x0, quorum=127.0.0.1:59713, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-10T11:46:22,202 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:377230x0, quorum=127.0.0.1:59713, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-10T11:46:22,202 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:37723-0x1000f99ff790003 connected 2024-12-10T11:46:22,203 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-10T11:46:22,203 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-10T11:46:22,205 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37723-0x1000f99ff790003, quorum=127.0.0.1:59713, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-10T11:46:22,207 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37723-0x1000f99ff790003, quorum=127.0.0.1:59713, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-10T11:46:22,208 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=37723 2024-12-10T11:46:22,208 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=37723 2024-12-10T11:46:22,209 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=37723 2024-12-10T11:46:22,211 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=37723 2024-12-10T11:46:22,211 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=37723 2024-12-10T11:46:22,227 DEBUG [M:0;ef751fafe6b1:40549 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;ef751fafe6b1:40549 2024-12-10T11:46:22,228 INFO [master/ef751fafe6b1:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/ef751fafe6b1,40549,1733831181287 2024-12-10T11:46:22,264 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43719-0x1000f99ff790002, quorum=127.0.0.1:59713, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-10T11:46:22,264 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36793-0x1000f99ff790001, quorum=127.0.0.1:59713, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-10T11:46:22,264 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40549-0x1000f99ff790000, quorum=127.0.0.1:59713, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-10T11:46:22,264 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37723-0x1000f99ff790003, quorum=127.0.0.1:59713, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-10T11:46:22,268 DEBUG [master/ef751fafe6b1:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:40549-0x1000f99ff790000, quorum=127.0.0.1:59713, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/ef751fafe6b1,40549,1733831181287 2024-12-10T11:46:22,296 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40549-0x1000f99ff790000, quorum=127.0.0.1:59713, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T11:46:22,296 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36793-0x1000f99ff790001, quorum=127.0.0.1:59713, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-10T11:46:22,296 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37723-0x1000f99ff790003, quorum=127.0.0.1:59713, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-10T11:46:22,296 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43719-0x1000f99ff790002, quorum=127.0.0.1:59713, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-10T11:46:22,296 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37723-0x1000f99ff790003, quorum=127.0.0.1:59713, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T11:46:22,296 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36793-0x1000f99ff790001, quorum=127.0.0.1:59713, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T11:46:22,296 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43719-0x1000f99ff790002, quorum=127.0.0.1:59713, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T11:46:22,297 DEBUG [master/ef751fafe6b1:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:40549-0x1000f99ff790000, quorum=127.0.0.1:59713, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-10T11:46:22,298 INFO [master/ef751fafe6b1:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/ef751fafe6b1,40549,1733831181287 from backup master directory 2024-12-10T11:46:22,310 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40549-0x1000f99ff790000, quorum=127.0.0.1:59713, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/ef751fafe6b1,40549,1733831181287 2024-12-10T11:46:22,310 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36793-0x1000f99ff790001, quorum=127.0.0.1:59713, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-10T11:46:22,310 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37723-0x1000f99ff790003, quorum=127.0.0.1:59713, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-10T11:46:22,310 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43719-0x1000f99ff790002, quorum=127.0.0.1:59713, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-10T11:46:22,310 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40549-0x1000f99ff790000, quorum=127.0.0.1:59713, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-10T11:46:22,311 WARN [master/ef751fafe6b1:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-10T11:46:22,311 INFO [master/ef751fafe6b1:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=ef751fafe6b1,40549,1733831181287 2024-12-10T11:46:22,313 INFO [master/ef751fafe6b1:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-12-10T11:46:22,315 INFO [master/ef751fafe6b1:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-12-10T11:46:22,369 DEBUG [master/ef751fafe6b1:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:45975/user/jenkins/test-data/f70f2c4e-74b6-0388-0390-35db2b15c52d/hbase.id] with ID: ebf1fa2c-a6ab-4b5f-8dca-c14901266ac9 2024-12-10T11:46:22,370 DEBUG [master/ef751fafe6b1:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:45975/user/jenkins/test-data/f70f2c4e-74b6-0388-0390-35db2b15c52d/.tmp/hbase.id 2024-12-10T11:46:22,376 WARN [master/ef751fafe6b1:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-10T11:46:22,376 WARN [master/ef751fafe6b1:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-10T11:46:22,379 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-884346255_22 at /127.0.0.1:49546 [Receiving block BP-1050097718-172.17.0.2-1733831177438:blk_-9223372036854775776_1003] {}] datanode.DataXceiver(331): 127.0.0.1:38893:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:49546 dst: /127.0.0.1:38893 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T11:46:22,385 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38893 is added to blk_-9223372036854775776_1004 (size=42) 2024-12-10T11:46:22,387 WARN [master/ef751fafe6b1:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-10T11:46:22,387 DEBUG [master/ef751fafe6b1:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:45975/user/jenkins/test-data/f70f2c4e-74b6-0388-0390-35db2b15c52d/.tmp/hbase.id]:[hdfs://localhost:45975/user/jenkins/test-data/f70f2c4e-74b6-0388-0390-35db2b15c52d/hbase.id] 2024-12-10T11:46:22,431 INFO [master/ef751fafe6b1:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-10T11:46:22,435 INFO [master/ef751fafe6b1:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-10T11:46:22,453 INFO [master/ef751fafe6b1:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 15ms. 2024-12-10T11:46:22,485 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37723-0x1000f99ff790003, quorum=127.0.0.1:59713, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T11:46:22,485 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43719-0x1000f99ff790002, quorum=127.0.0.1:59713, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T11:46:22,485 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36793-0x1000f99ff790001, quorum=127.0.0.1:59713, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T11:46:22,485 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40549-0x1000f99ff790000, quorum=127.0.0.1:59713, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T11:46:22,498 WARN [master/ef751fafe6b1:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-10T11:46:22,498 WARN [master/ef751fafe6b1:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-10T11:46:22,501 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-884346255_22 at /127.0.0.1:49568 [Receiving block BP-1050097718-172.17.0.2-1733831177438:blk_-9223372036854775760_1005] {}] datanode.DataXceiver(331): 127.0.0.1:38893:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:49568 dst: /127.0.0.1:38893 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T11:46:22,504 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38893 is added to blk_-9223372036854775760_1006 (size=196) 2024-12-10T11:46:22,506 WARN [master/ef751fafe6b1:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-10T11:46:22,519 INFO [master/ef751fafe6b1:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-10T11:46:22,520 INFO [master/ef751fafe6b1:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-10T11:46:22,525 INFO [master/ef751fafe6b1:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-10T11:46:22,548 WARN [master/ef751fafe6b1:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-10T11:46:22,548 WARN [master/ef751fafe6b1:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-10T11:46:22,550 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-884346255_22 at /127.0.0.1:49594 [Receiving block BP-1050097718-172.17.0.2-1733831177438:blk_-9223372036854775744_1007] {}] datanode.DataXceiver(331): 127.0.0.1:38893:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:49594 dst: /127.0.0.1:38893 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T11:46:22,555 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38893 is added to blk_-9223372036854775744_1008 (size=1189) 2024-12-10T11:46:22,556 WARN [master/ef751fafe6b1:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-10T11:46:22,570 INFO [master/ef751fafe6b1:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:45975/user/jenkins/test-data/f70f2c4e-74b6-0388-0390-35db2b15c52d/MasterData/data/master/store 2024-12-10T11:46:22,584 WARN [master/ef751fafe6b1:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-10T11:46:22,584 WARN [master/ef751fafe6b1:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-10T11:46:22,587 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-884346255_22 at /127.0.0.1:46958 [Receiving block BP-1050097718-172.17.0.2-1733831177438:blk_-9223372036854775728_1009] {}] datanode.DataXceiver(331): 127.0.0.1:33591:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:46958 dst: /127.0.0.1:33591 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T11:46:22,591 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33591 is added to blk_-9223372036854775728_1010 (size=34) 2024-12-10T11:46:22,592 WARN [master/ef751fafe6b1:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-10T11:46:22,596 INFO [master/ef751fafe6b1:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-12-10T11:46:22,598 DEBUG [master/ef751fafe6b1:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T11:46:22,599 DEBUG [master/ef751fafe6b1:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-10T11:46:22,600 INFO [master/ef751fafe6b1:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-10T11:46:22,600 DEBUG [master/ef751fafe6b1:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-10T11:46:22,601 DEBUG [master/ef751fafe6b1:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-10T11:46:22,601 DEBUG [master/ef751fafe6b1:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-10T11:46:22,602 INFO [master/ef751fafe6b1:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-10T11:46:22,603 DEBUG [master/ef751fafe6b1:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733831182599Disabling compacts and flushes for region at 1733831182599Disabling writes for close at 1733831182601 (+2 ms)Writing region close event to WAL at 1733831182601Closed at 1733831182601 2024-12-10T11:46:22,604 WARN [master/ef751fafe6b1:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:45975/user/jenkins/test-data/f70f2c4e-74b6-0388-0390-35db2b15c52d/MasterData/data/master/store/.initializing 2024-12-10T11:46:22,604 DEBUG [master/ef751fafe6b1:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:45975/user/jenkins/test-data/f70f2c4e-74b6-0388-0390-35db2b15c52d/MasterData/WALs/ef751fafe6b1,40549,1733831181287 2024-12-10T11:46:22,611 INFO [master/ef751fafe6b1:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-10T11:46:22,624 INFO [master/ef751fafe6b1:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=ef751fafe6b1%2C40549%2C1733831181287, suffix=, logDir=hdfs://localhost:45975/user/jenkins/test-data/f70f2c4e-74b6-0388-0390-35db2b15c52d/MasterData/WALs/ef751fafe6b1,40549,1733831181287, archiveDir=hdfs://localhost:45975/user/jenkins/test-data/f70f2c4e-74b6-0388-0390-35db2b15c52d/MasterData/oldWALs, maxLogs=10 2024-12-10T11:46:22,647 DEBUG [master/ef751fafe6b1:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/f70f2c4e-74b6-0388-0390-35db2b15c52d/MasterData/WALs/ef751fafe6b1,40549,1733831181287/ef751fafe6b1%2C40549%2C1733831181287.1733831182627, exclude list is [], retry=0 2024-12-10T11:46:22,663 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(244): No decryptEncryptedDataEncryptionKey method in DFSClient, should be hadoop version with HDFS-12396 java.lang.NoSuchMethodException: org.apache.hadoop.hdfs.DFSClient.decryptEncryptedDataEncryptionKey(org.apache.hadoop.fs.FileEncryptionInfo) at java.lang.Class.getDeclaredMethod(Class.java:2675) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelperWithoutHDFS12396(FanOutOneBlockAsyncDFSOutputSaslHelper.java:183) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelper(FanOutOneBlockAsyncDFSOutputSaslHelper.java:242) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.(FanOutOneBlockAsyncDFSOutputSaslHelper.java:253) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.initialize(FanOutOneBlockAsyncDFSOutputHelper.java:413) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper$5.operationComplete(FanOutOneBlockAsyncDFSOutputHelper.java:472) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper$5.operationComplete(FanOutOneBlockAsyncDFSOutputHelper.java:467) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.NettyFutureUtils.lambda$addListener$0(NettyFutureUtils.java:56) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListener0(DefaultPromise.java:590) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListeners0(DefaultPromise.java:583) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListenersNow(DefaultPromise.java:559) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListeners(DefaultPromise.java:492) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.setValue0(DefaultPromise.java:636) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.setSuccess0(DefaultPromise.java:625) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.trySuccess(DefaultPromise.java:105) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPromise.trySuccess(DefaultChannelPromise.java:84) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.fulfillConnectPromise(AbstractEpollChannel.java:658) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.finishConnect(AbstractEpollChannel.java:696) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.epollOutReady(AbstractEpollChannel.java:567) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.processReady(EpollEventLoop.java:491) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:399) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T11:46:22,665 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:33591,DS-d3c7f28e-8dd8-4a16-81c0-9f69b8e63380,DISK] 2024-12-10T11:46:22,665 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43797,DS-55f19d74-cb7e-4695-93f8-9554f6e22f52,DISK] 2024-12-10T11:46:22,665 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38893,DS-2f7c98c9-d0e7-4c9f-87fa-2c4211e06011,DISK] 2024-12-10T11:46:22,667 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.ProtobufDecoder(117): Hadoop 3.3 and above shades protobuf. 2024-12-10T11:46:22,704 INFO [master/ef751fafe6b1:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/f70f2c4e-74b6-0388-0390-35db2b15c52d/MasterData/WALs/ef751fafe6b1,40549,1733831181287/ef751fafe6b1%2C40549%2C1733831181287.1733831182627 2024-12-10T11:46:22,705 DEBUG [master/ef751fafe6b1:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:35301:35301),(127.0.0.1/127.0.0.1:43525:43525),(127.0.0.1/127.0.0.1:34881:34881)] 2024-12-10T11:46:22,706 DEBUG [master/ef751fafe6b1:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-10T11:46:22,706 DEBUG [master/ef751fafe6b1:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T11:46:22,708 DEBUG [master/ef751fafe6b1:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-10T11:46:22,709 DEBUG [master/ef751fafe6b1:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-10T11:46:22,742 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-10T11:46:22,762 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-10T11:46:22,765 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T11:46:22,767 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-10T11:46:22,768 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-10T11:46:22,771 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-10T11:46:22,771 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T11:46:22,772 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T11:46:22,772 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-10T11:46:22,775 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-10T11:46:22,775 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T11:46:22,776 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T11:46:22,776 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-10T11:46:22,778 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-10T11:46:22,779 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T11:46:22,779 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T11:46:22,780 DEBUG [master/ef751fafe6b1:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-10T11:46:22,783 DEBUG [master/ef751fafe6b1:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45975/user/jenkins/test-data/f70f2c4e-74b6-0388-0390-35db2b15c52d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-10T11:46:22,784 DEBUG [master/ef751fafe6b1:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45975/user/jenkins/test-data/f70f2c4e-74b6-0388-0390-35db2b15c52d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-10T11:46:22,789 DEBUG [master/ef751fafe6b1:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-10T11:46:22,790 DEBUG [master/ef751fafe6b1:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-10T11:46:22,793 DEBUG [master/ef751fafe6b1:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-10T11:46:22,798 DEBUG [master/ef751fafe6b1:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-10T11:46:22,804 DEBUG [master/ef751fafe6b1:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45975/user/jenkins/test-data/f70f2c4e-74b6-0388-0390-35db2b15c52d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-10T11:46:22,806 INFO [master/ef751fafe6b1:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=61150869, jitterRate=-0.08878104388713837}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-10T11:46:22,814 DEBUG [master/ef751fafe6b1:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733831182720Initializing all the Stores at 1733831182722 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733831182722Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733831182723 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733831182723Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733831182723Cleaning up temporary data from old regions at 1733831182790 (+67 ms)Region opened successfully at 1733831182813 (+23 ms) 2024-12-10T11:46:22,815 INFO [master/ef751fafe6b1:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-10T11:46:22,845 DEBUG [master/ef751fafe6b1:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@95957c9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=ef751fafe6b1/172.17.0.2:0 2024-12-10T11:46:22,872 INFO [master/ef751fafe6b1:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-10T11:46:22,881 INFO [master/ef751fafe6b1:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-10T11:46:22,881 INFO [master/ef751fafe6b1:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-10T11:46:22,883 INFO [master/ef751fafe6b1:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-10T11:46:22,884 INFO [master/ef751fafe6b1:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 1 msec 2024-12-10T11:46:22,889 INFO [master/ef751fafe6b1:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 4 msec 2024-12-10T11:46:22,889 INFO [master/ef751fafe6b1:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-10T11:46:22,909 INFO [master/ef751fafe6b1:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-10T11:46:22,917 DEBUG [master/ef751fafe6b1:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40549-0x1000f99ff790000, quorum=127.0.0.1:59713, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-10T11:46:22,963 DEBUG [master/ef751fafe6b1:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-10T11:46:22,966 INFO [master/ef751fafe6b1:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-10T11:46:22,968 DEBUG [master/ef751fafe6b1:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40549-0x1000f99ff790000, quorum=127.0.0.1:59713, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-10T11:46:22,979 DEBUG [master/ef751fafe6b1:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-10T11:46:22,981 INFO [master/ef751fafe6b1:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-10T11:46:22,985 DEBUG [master/ef751fafe6b1:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40549-0x1000f99ff790000, quorum=127.0.0.1:59713, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-10T11:46:22,996 DEBUG [master/ef751fafe6b1:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-10T11:46:22,998 DEBUG [master/ef751fafe6b1:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40549-0x1000f99ff790000, quorum=127.0.0.1:59713, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-10T11:46:23,009 DEBUG [master/ef751fafe6b1:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-10T11:46:23,031 DEBUG [master/ef751fafe6b1:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40549-0x1000f99ff790000, quorum=127.0.0.1:59713, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-10T11:46:23,042 DEBUG [master/ef751fafe6b1:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-10T11:46:23,055 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43719-0x1000f99ff790002, quorum=127.0.0.1:59713, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-10T11:46:23,055 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36793-0x1000f99ff790001, quorum=127.0.0.1:59713, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-10T11:46:23,055 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37723-0x1000f99ff790003, quorum=127.0.0.1:59713, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-10T11:46:23,055 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40549-0x1000f99ff790000, quorum=127.0.0.1:59713, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-10T11:46:23,055 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43719-0x1000f99ff790002, quorum=127.0.0.1:59713, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T11:46:23,055 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36793-0x1000f99ff790001, quorum=127.0.0.1:59713, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T11:46:23,055 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37723-0x1000f99ff790003, quorum=127.0.0.1:59713, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T11:46:23,055 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40549-0x1000f99ff790000, quorum=127.0.0.1:59713, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T11:46:23,059 INFO [master/ef751fafe6b1:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=ef751fafe6b1,40549,1733831181287, sessionid=0x1000f99ff790000, setting cluster-up flag (Was=false) 2024-12-10T11:46:23,088 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40549-0x1000f99ff790000, quorum=127.0.0.1:59713, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T11:46:23,088 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36793-0x1000f99ff790001, quorum=127.0.0.1:59713, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T11:46:23,088 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37723-0x1000f99ff790003, quorum=127.0.0.1:59713, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T11:46:23,088 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43719-0x1000f99ff790002, quorum=127.0.0.1:59713, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T11:46:23,118 DEBUG [master/ef751fafe6b1:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-10T11:46:23,123 DEBUG [master/ef751fafe6b1:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=ef751fafe6b1,40549,1733831181287 2024-12-10T11:46:23,146 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36793-0x1000f99ff790001, quorum=127.0.0.1:59713, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T11:46:23,146 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40549-0x1000f99ff790000, quorum=127.0.0.1:59713, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T11:46:23,146 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37723-0x1000f99ff790003, quorum=127.0.0.1:59713, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T11:46:23,146 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43719-0x1000f99ff790002, quorum=127.0.0.1:59713, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T11:46:23,176 DEBUG [master/ef751fafe6b1:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-10T11:46:23,181 DEBUG [master/ef751fafe6b1:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=ef751fafe6b1,40549,1733831181287 2024-12-10T11:46:23,191 INFO [master/ef751fafe6b1:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:45975/user/jenkins/test-data/f70f2c4e-74b6-0388-0390-35db2b15c52d/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-10T11:46:23,215 INFO [RS:0;ef751fafe6b1:36793 {}] regionserver.HRegionServer(746): ClusterId : ebf1fa2c-a6ab-4b5f-8dca-c14901266ac9 2024-12-10T11:46:23,215 INFO [RS:2;ef751fafe6b1:37723 {}] regionserver.HRegionServer(746): ClusterId : ebf1fa2c-a6ab-4b5f-8dca-c14901266ac9 2024-12-10T11:46:23,216 INFO [RS:1;ef751fafe6b1:43719 {}] regionserver.HRegionServer(746): ClusterId : ebf1fa2c-a6ab-4b5f-8dca-c14901266ac9 2024-12-10T11:46:23,217 DEBUG [RS:1;ef751fafe6b1:43719 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-10T11:46:23,217 DEBUG [RS:0;ef751fafe6b1:36793 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-10T11:46:23,217 DEBUG [RS:2;ef751fafe6b1:37723 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-10T11:46:23,239 DEBUG [RS:1;ef751fafe6b1:43719 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-10T11:46:23,239 DEBUG [RS:0;ef751fafe6b1:36793 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-10T11:46:23,239 DEBUG [RS:2;ef751fafe6b1:37723 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-10T11:46:23,240 DEBUG [RS:1;ef751fafe6b1:43719 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-10T11:46:23,240 DEBUG [RS:0;ef751fafe6b1:36793 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-10T11:46:23,240 DEBUG [RS:2;ef751fafe6b1:37723 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-10T11:46:23,253 DEBUG [RS:1;ef751fafe6b1:43719 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-10T11:46:23,253 DEBUG [RS:2;ef751fafe6b1:37723 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-10T11:46:23,253 DEBUG [RS:0;ef751fafe6b1:36793 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-10T11:46:23,253 DEBUG [RS:0;ef751fafe6b1:36793 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@75ef766d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=ef751fafe6b1/172.17.0.2:0 2024-12-10T11:46:23,253 DEBUG [RS:2;ef751fafe6b1:37723 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@42478ba4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=ef751fafe6b1/172.17.0.2:0 2024-12-10T11:46:23,253 DEBUG [RS:1;ef751fafe6b1:43719 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6525c1fa, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=ef751fafe6b1/172.17.0.2:0 2024-12-10T11:46:23,262 DEBUG [master/ef751fafe6b1:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-10T11:46:23,268 DEBUG [RS:1;ef751fafe6b1:43719 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;ef751fafe6b1:43719 2024-12-10T11:46:23,271 INFO [RS:1;ef751fafe6b1:43719 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-10T11:46:23,271 INFO [RS:1;ef751fafe6b1:43719 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-10T11:46:23,271 DEBUG [RS:1;ef751fafe6b1:43719 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-10T11:46:23,272 DEBUG [RS:2;ef751fafe6b1:37723 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:2;ef751fafe6b1:37723 2024-12-10T11:46:23,272 DEBUG [RS:0;ef751fafe6b1:36793 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;ef751fafe6b1:36793 2024-12-10T11:46:23,273 INFO [RS:2;ef751fafe6b1:37723 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-10T11:46:23,273 INFO [RS:0;ef751fafe6b1:36793 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-10T11:46:23,273 INFO [RS:2;ef751fafe6b1:37723 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-10T11:46:23,273 INFO [RS:0;ef751fafe6b1:36793 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-10T11:46:23,273 DEBUG [RS:2;ef751fafe6b1:37723 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-10T11:46:23,273 DEBUG [RS:0;ef751fafe6b1:36793 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-10T11:46:23,273 INFO [master/ef751fafe6b1:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-10T11:46:23,274 INFO [RS:1;ef751fafe6b1:43719 {}] regionserver.HRegionServer(2659): reportForDuty to master=ef751fafe6b1,40549,1733831181287 with port=43719, startcode=1733831182093 2024-12-10T11:46:23,274 INFO [RS:0;ef751fafe6b1:36793 {}] regionserver.HRegionServer(2659): reportForDuty to master=ef751fafe6b1,40549,1733831181287 with port=36793, startcode=1733831181979 2024-12-10T11:46:23,274 INFO [RS:2;ef751fafe6b1:37723 {}] regionserver.HRegionServer(2659): reportForDuty to master=ef751fafe6b1,40549,1733831181287 with port=37723, startcode=1733831182165 2024-12-10T11:46:23,280 INFO [master/ef751fafe6b1:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-10T11:46:23,287 DEBUG [RS:0;ef751fafe6b1:36793 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-10T11:46:23,287 DEBUG [RS:2;ef751fafe6b1:37723 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-10T11:46:23,289 DEBUG [RS:1;ef751fafe6b1:43719 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-10T11:46:23,287 DEBUG [master/ef751fafe6b1:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: ef751fafe6b1,40549,1733831181287 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-10T11:46:23,295 DEBUG [master/ef751fafe6b1:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/ef751fafe6b1:0, corePoolSize=5, maxPoolSize=5 2024-12-10T11:46:23,295 DEBUG [master/ef751fafe6b1:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/ef751fafe6b1:0, corePoolSize=5, maxPoolSize=5 2024-12-10T11:46:23,295 DEBUG [master/ef751fafe6b1:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/ef751fafe6b1:0, corePoolSize=5, maxPoolSize=5 2024-12-10T11:46:23,295 DEBUG [master/ef751fafe6b1:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/ef751fafe6b1:0, corePoolSize=5, maxPoolSize=5 2024-12-10T11:46:23,295 DEBUG [master/ef751fafe6b1:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/ef751fafe6b1:0, corePoolSize=10, maxPoolSize=10 2024-12-10T11:46:23,295 DEBUG [master/ef751fafe6b1:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/ef751fafe6b1:0, corePoolSize=1, maxPoolSize=1 2024-12-10T11:46:23,295 DEBUG [master/ef751fafe6b1:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/ef751fafe6b1:0, corePoolSize=2, maxPoolSize=2 2024-12-10T11:46:23,296 DEBUG [master/ef751fafe6b1:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/ef751fafe6b1:0, corePoolSize=1, maxPoolSize=1 2024-12-10T11:46:23,302 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33591 is added to blk_-9223372036854775788_1002 (size=7) 2024-12-10T11:46:23,303 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38893 is added to blk_-9223372036854775789_1002 (size=7) 2024-12-10T11:46:23,311 INFO [master/ef751fafe6b1:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733831213311 2024-12-10T11:46:23,313 INFO [master/ef751fafe6b1:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-10T11:46:23,315 INFO [master/ef751fafe6b1:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-10T11:46:23,319 INFO [master/ef751fafe6b1:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-10T11:46:23,319 INFO [master/ef751fafe6b1:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-10T11:46:23,319 INFO [master/ef751fafe6b1:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-10T11:46:23,319 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-10T11:46:23,320 INFO [master/ef751fafe6b1:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-10T11:46:23,320 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-10T11:46:23,320 INFO [master/ef751fafe6b1:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-10T11:46:23,324 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51953, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-12-10T11:46:23,324 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35419, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-12-10T11:46:23,324 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53833, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-12-10T11:46:23,326 INFO [master/ef751fafe6b1:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-10T11:46:23,327 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T11:46:23,327 INFO [master/ef751fafe6b1:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-10T11:46:23,327 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-10T11:46:23,327 INFO [master/ef751fafe6b1:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-10T11:46:23,332 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40549 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:667) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-12-10T11:46:23,336 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40549 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:667) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-12-10T11:46:23,336 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40549 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:667) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-12-10T11:46:23,337 INFO [master/ef751fafe6b1:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-10T11:46:23,338 INFO [master/ef751fafe6b1:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-10T11:46:23,343 DEBUG [master/ef751fafe6b1:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/ef751fafe6b1:0:becomeActiveMaster-HFileCleaner.large.0-1733831183339,5,FailOnTimeoutGroup] 2024-12-10T11:46:23,345 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-10T11:46:23,345 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-10T11:46:23,347 DEBUG [master/ef751fafe6b1:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/ef751fafe6b1:0:becomeActiveMaster-HFileCleaner.small.0-1733831183343,5,FailOnTimeoutGroup] 2024-12-10T11:46:23,347 INFO [master/ef751fafe6b1:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-10T11:46:23,347 INFO [master/ef751fafe6b1:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-10T11:46:23,348 INFO [master/ef751fafe6b1:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-10T11:46:23,349 INFO [master/ef751fafe6b1:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-10T11:46:23,352 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-884346255_22 at /127.0.0.1:47006 [Receiving block BP-1050097718-172.17.0.2-1733831177438:blk_-9223372036854775712_1012] {}] datanode.DataXceiver(331): 127.0.0.1:33591:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:47006 dst: /127.0.0.1:33591 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T11:46:23,357 DEBUG [RS:1;ef751fafe6b1:43719 {}] regionserver.HRegionServer(2683): Master is not running yet 2024-12-10T11:46:23,357 DEBUG [RS:2;ef751fafe6b1:37723 {}] regionserver.HRegionServer(2683): Master is not running yet 2024-12-10T11:46:23,357 DEBUG [RS:0;ef751fafe6b1:36793 {}] regionserver.HRegionServer(2683): Master is not running yet 2024-12-10T11:46:23,357 WARN [RS:2;ef751fafe6b1:37723 {}] regionserver.HRegionServer(841): reportForDuty failed; sleeping 100 ms and then retrying. 2024-12-10T11:46:23,357 WARN [RS:1;ef751fafe6b1:43719 {}] regionserver.HRegionServer(841): reportForDuty failed; sleeping 100 ms and then retrying. 2024-12-10T11:46:23,357 WARN [RS:0;ef751fafe6b1:36793 {}] regionserver.HRegionServer(841): reportForDuty failed; sleeping 100 ms and then retrying. 2024-12-10T11:46:23,357 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33591 is added to blk_-9223372036854775712_1013 (size=1321) 2024-12-10T11:46:23,359 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-10T11:46:23,360 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:45975/user/jenkins/test-data/f70f2c4e-74b6-0388-0390-35db2b15c52d/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-10T11:46:23,360 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:45975/user/jenkins/test-data/f70f2c4e-74b6-0388-0390-35db2b15c52d 2024-12-10T11:46:23,369 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-10T11:46:23,369 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-10T11:46:23,372 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-884346255_22 at /127.0.0.1:49636 [Receiving block BP-1050097718-172.17.0.2-1733831177438:blk_-9223372036854775696_1014] {}] datanode.DataXceiver(331): 127.0.0.1:38893:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:49636 dst: /127.0.0.1:38893 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T11:46:23,376 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38893 is added to blk_-9223372036854775696_1015 (size=32) 2024-12-10T11:46:23,377 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-10T11:46:23,378 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T11:46:23,381 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-10T11:46:23,383 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-10T11:46:23,384 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T11:46:23,385 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-10T11:46:23,385 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-10T11:46:23,388 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-10T11:46:23,388 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T11:46:23,389 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-10T11:46:23,390 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-10T11:46:23,393 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-10T11:46:23,393 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T11:46:23,395 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-10T11:46:23,395 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-10T11:46:23,398 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-10T11:46:23,398 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T11:46:23,399 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-10T11:46:23,400 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-10T11:46:23,401 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45975/user/jenkins/test-data/f70f2c4e-74b6-0388-0390-35db2b15c52d/data/hbase/meta/1588230740 2024-12-10T11:46:23,402 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45975/user/jenkins/test-data/f70f2c4e-74b6-0388-0390-35db2b15c52d/data/hbase/meta/1588230740 2024-12-10T11:46:23,405 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-10T11:46:23,405 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-10T11:46:23,406 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-10T11:46:23,409 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-10T11:46:23,415 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45975/user/jenkins/test-data/f70f2c4e-74b6-0388-0390-35db2b15c52d/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-10T11:46:23,416 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=72338392, jitterRate=0.07792603969573975}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-10T11:46:23,418 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733831183378Initializing all the Stores at 1733831183380 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733831183380Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733831183380Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733831183380Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733831183380Cleaning up temporary data from old regions at 1733831183405 (+25 ms)Region opened successfully at 1733831183418 (+13 ms) 2024-12-10T11:46:23,419 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-10T11:46:23,419 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-10T11:46:23,419 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-10T11:46:23,419 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-10T11:46:23,419 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-10T11:46:23,421 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-10T11:46:23,421 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733831183419Disabling compacts and flushes for region at 1733831183419Disabling writes for close at 1733831183419Writing region close event to WAL at 1733831183420 (+1 ms)Closed at 1733831183421 (+1 ms) 2024-12-10T11:46:23,424 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-10T11:46:23,424 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-10T11:46:23,430 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-10T11:46:23,438 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-10T11:46:23,441 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-10T11:46:23,458 INFO [RS:1;ef751fafe6b1:43719 {}] regionserver.HRegionServer(2659): reportForDuty to master=ef751fafe6b1,40549,1733831181287 with port=43719, startcode=1733831182093 2024-12-10T11:46:23,458 INFO [RS:2;ef751fafe6b1:37723 {}] regionserver.HRegionServer(2659): reportForDuty to master=ef751fafe6b1,40549,1733831181287 with port=37723, startcode=1733831182165 2024-12-10T11:46:23,458 INFO [RS:0;ef751fafe6b1:36793 {}] regionserver.HRegionServer(2659): reportForDuty to master=ef751fafe6b1,40549,1733831181287 with port=36793, startcode=1733831181979 2024-12-10T11:46:23,460 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40549 {}] master.ServerManager(363): Checking decommissioned status of RegionServer ef751fafe6b1,37723,1733831182165 2024-12-10T11:46:23,462 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40549 {}] master.ServerManager(517): Registering regionserver=ef751fafe6b1,37723,1733831182165 2024-12-10T11:46:23,470 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40549 {}] master.ServerManager(363): Checking decommissioned status of RegionServer ef751fafe6b1,43719,1733831182093 2024-12-10T11:46:23,470 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40549 {}] master.ServerManager(517): Registering regionserver=ef751fafe6b1,43719,1733831182093 2024-12-10T11:46:23,470 DEBUG [RS:2;ef751fafe6b1:37723 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:45975/user/jenkins/test-data/f70f2c4e-74b6-0388-0390-35db2b15c52d 2024-12-10T11:46:23,470 DEBUG [RS:2;ef751fafe6b1:37723 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:45975 2024-12-10T11:46:23,470 DEBUG [RS:2;ef751fafe6b1:37723 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-10T11:46:23,473 DEBUG [RS:1;ef751fafe6b1:43719 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:45975/user/jenkins/test-data/f70f2c4e-74b6-0388-0390-35db2b15c52d 2024-12-10T11:46:23,473 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40549 {}] master.ServerManager(363): Checking decommissioned status of RegionServer ef751fafe6b1,36793,1733831181979 2024-12-10T11:46:23,473 DEBUG [RS:1;ef751fafe6b1:43719 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:45975 2024-12-10T11:46:23,473 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40549 {}] master.ServerManager(517): Registering regionserver=ef751fafe6b1,36793,1733831181979 2024-12-10T11:46:23,473 DEBUG [RS:1;ef751fafe6b1:43719 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-10T11:46:23,476 DEBUG [RS:0;ef751fafe6b1:36793 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:45975/user/jenkins/test-data/f70f2c4e-74b6-0388-0390-35db2b15c52d 2024-12-10T11:46:23,476 DEBUG [RS:0;ef751fafe6b1:36793 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:45975 2024-12-10T11:46:23,476 DEBUG [RS:0;ef751fafe6b1:36793 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-10T11:46:23,492 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40549-0x1000f99ff790000, quorum=127.0.0.1:59713, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-10T11:46:23,526 DEBUG [RS:2;ef751fafe6b1:37723 {}] zookeeper.ZKUtil(111): regionserver:37723-0x1000f99ff790003, quorum=127.0.0.1:59713, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/ef751fafe6b1,37723,1733831182165 2024-12-10T11:46:23,527 WARN [RS:2;ef751fafe6b1:37723 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-10T11:46:23,527 DEBUG [RS:1;ef751fafe6b1:43719 {}] zookeeper.ZKUtil(111): regionserver:43719-0x1000f99ff790002, quorum=127.0.0.1:59713, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/ef751fafe6b1,43719,1733831182093 2024-12-10T11:46:23,527 DEBUG [RS:0;ef751fafe6b1:36793 {}] zookeeper.ZKUtil(111): regionserver:36793-0x1000f99ff790001, quorum=127.0.0.1:59713, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/ef751fafe6b1,36793,1733831181979 2024-12-10T11:46:23,527 INFO [RS:2;ef751fafe6b1:37723 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-10T11:46:23,527 WARN [RS:1;ef751fafe6b1:43719 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-10T11:46:23,527 WARN [RS:0;ef751fafe6b1:36793 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-10T11:46:23,528 DEBUG [RS:2;ef751fafe6b1:37723 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:45975/user/jenkins/test-data/f70f2c4e-74b6-0388-0390-35db2b15c52d/WALs/ef751fafe6b1,37723,1733831182165 2024-12-10T11:46:23,528 INFO [RS:1;ef751fafe6b1:43719 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-10T11:46:23,528 INFO [RS:0;ef751fafe6b1:36793 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-10T11:46:23,528 DEBUG [RS:1;ef751fafe6b1:43719 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:45975/user/jenkins/test-data/f70f2c4e-74b6-0388-0390-35db2b15c52d/WALs/ef751fafe6b1,43719,1733831182093 2024-12-10T11:46:23,528 DEBUG [RS:0;ef751fafe6b1:36793 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:45975/user/jenkins/test-data/f70f2c4e-74b6-0388-0390-35db2b15c52d/WALs/ef751fafe6b1,36793,1733831181979 2024-12-10T11:46:23,530 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [ef751fafe6b1,37723,1733831182165] 2024-12-10T11:46:23,530 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [ef751fafe6b1,36793,1733831181979] 2024-12-10T11:46:23,530 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [ef751fafe6b1,43719,1733831182093] 2024-12-10T11:46:23,554 INFO [RS:0;ef751fafe6b1:36793 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-10T11:46:23,554 INFO [RS:2;ef751fafe6b1:37723 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-10T11:46:23,554 INFO [RS:1;ef751fafe6b1:43719 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-10T11:46:23,568 INFO [RS:0;ef751fafe6b1:36793 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-10T11:46:23,568 INFO [RS:1;ef751fafe6b1:43719 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-10T11:46:23,568 INFO [RS:2;ef751fafe6b1:37723 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-10T11:46:23,573 INFO [RS:2;ef751fafe6b1:37723 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-10T11:46:23,573 INFO [RS:0;ef751fafe6b1:36793 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-10T11:46:23,573 INFO [RS:1;ef751fafe6b1:43719 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-10T11:46:23,573 INFO [RS:2;ef751fafe6b1:37723 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-10T11:46:23,573 INFO [RS:1;ef751fafe6b1:43719 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-10T11:46:23,573 INFO [RS:0;ef751fafe6b1:36793 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-10T11:46:23,578 INFO [RS:2;ef751fafe6b1:37723 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-10T11:46:23,578 INFO [RS:0;ef751fafe6b1:36793 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-10T11:46:23,578 INFO [RS:1;ef751fafe6b1:43719 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-10T11:46:23,583 INFO [RS:0;ef751fafe6b1:36793 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-10T11:46:23,583 INFO [RS:2;ef751fafe6b1:37723 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-10T11:46:23,583 INFO [RS:1;ef751fafe6b1:43719 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-10T11:46:23,585 INFO [RS:0;ef751fafe6b1:36793 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-10T11:46:23,585 INFO [RS:2;ef751fafe6b1:37723 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-10T11:46:23,585 INFO [RS:1;ef751fafe6b1:43719 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-10T11:46:23,585 DEBUG [RS:1;ef751fafe6b1:43719 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/ef751fafe6b1:0, corePoolSize=1, maxPoolSize=1 2024-12-10T11:46:23,585 DEBUG [RS:0;ef751fafe6b1:36793 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/ef751fafe6b1:0, corePoolSize=1, maxPoolSize=1 2024-12-10T11:46:23,585 DEBUG [RS:2;ef751fafe6b1:37723 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/ef751fafe6b1:0, corePoolSize=1, maxPoolSize=1 2024-12-10T11:46:23,585 DEBUG [RS:1;ef751fafe6b1:43719 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/ef751fafe6b1:0, corePoolSize=1, maxPoolSize=1 2024-12-10T11:46:23,585 DEBUG [RS:0;ef751fafe6b1:36793 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/ef751fafe6b1:0, corePoolSize=1, maxPoolSize=1 2024-12-10T11:46:23,585 DEBUG [RS:2;ef751fafe6b1:37723 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/ef751fafe6b1:0, corePoolSize=1, maxPoolSize=1 2024-12-10T11:46:23,586 DEBUG [RS:1;ef751fafe6b1:43719 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/ef751fafe6b1:0, corePoolSize=1, maxPoolSize=1 2024-12-10T11:46:23,586 DEBUG [RS:0;ef751fafe6b1:36793 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/ef751fafe6b1:0, corePoolSize=1, maxPoolSize=1 2024-12-10T11:46:23,586 DEBUG [RS:2;ef751fafe6b1:37723 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/ef751fafe6b1:0, corePoolSize=1, maxPoolSize=1 2024-12-10T11:46:23,586 DEBUG [RS:1;ef751fafe6b1:43719 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/ef751fafe6b1:0, corePoolSize=1, maxPoolSize=1 2024-12-10T11:46:23,586 DEBUG [RS:0;ef751fafe6b1:36793 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/ef751fafe6b1:0, corePoolSize=1, maxPoolSize=1 2024-12-10T11:46:23,586 DEBUG [RS:1;ef751fafe6b1:43719 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/ef751fafe6b1:0, corePoolSize=1, maxPoolSize=1 2024-12-10T11:46:23,586 DEBUG [RS:0;ef751fafe6b1:36793 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/ef751fafe6b1:0, corePoolSize=1, maxPoolSize=1 2024-12-10T11:46:23,586 DEBUG [RS:2;ef751fafe6b1:37723 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/ef751fafe6b1:0, corePoolSize=1, maxPoolSize=1 2024-12-10T11:46:23,586 DEBUG [RS:1;ef751fafe6b1:43719 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/ef751fafe6b1:0, corePoolSize=2, maxPoolSize=2 2024-12-10T11:46:23,586 DEBUG [RS:0;ef751fafe6b1:36793 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/ef751fafe6b1:0, corePoolSize=2, maxPoolSize=2 2024-12-10T11:46:23,586 DEBUG [RS:2;ef751fafe6b1:37723 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/ef751fafe6b1:0, corePoolSize=1, maxPoolSize=1 2024-12-10T11:46:23,586 DEBUG [RS:1;ef751fafe6b1:43719 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/ef751fafe6b1:0, corePoolSize=1, maxPoolSize=1 2024-12-10T11:46:23,586 DEBUG [RS:0;ef751fafe6b1:36793 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/ef751fafe6b1:0, corePoolSize=1, maxPoolSize=1 2024-12-10T11:46:23,586 DEBUG [RS:1;ef751fafe6b1:43719 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/ef751fafe6b1:0, corePoolSize=1, maxPoolSize=1 2024-12-10T11:46:23,586 DEBUG [RS:0;ef751fafe6b1:36793 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/ef751fafe6b1:0, corePoolSize=1, maxPoolSize=1 2024-12-10T11:46:23,586 DEBUG [RS:2;ef751fafe6b1:37723 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/ef751fafe6b1:0, corePoolSize=2, maxPoolSize=2 2024-12-10T11:46:23,586 DEBUG [RS:1;ef751fafe6b1:43719 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/ef751fafe6b1:0, corePoolSize=1, maxPoolSize=1 2024-12-10T11:46:23,586 DEBUG [RS:0;ef751fafe6b1:36793 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/ef751fafe6b1:0, corePoolSize=1, maxPoolSize=1 2024-12-10T11:46:23,586 DEBUG [RS:2;ef751fafe6b1:37723 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/ef751fafe6b1:0, corePoolSize=1, maxPoolSize=1 2024-12-10T11:46:23,586 DEBUG [RS:1;ef751fafe6b1:43719 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/ef751fafe6b1:0, corePoolSize=1, maxPoolSize=1 2024-12-10T11:46:23,586 DEBUG [RS:0;ef751fafe6b1:36793 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/ef751fafe6b1:0, corePoolSize=1, maxPoolSize=1 2024-12-10T11:46:23,586 DEBUG [RS:1;ef751fafe6b1:43719 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/ef751fafe6b1:0, corePoolSize=1, maxPoolSize=1 2024-12-10T11:46:23,586 DEBUG [RS:2;ef751fafe6b1:37723 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/ef751fafe6b1:0, corePoolSize=1, maxPoolSize=1 2024-12-10T11:46:23,587 DEBUG [RS:0;ef751fafe6b1:36793 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/ef751fafe6b1:0, corePoolSize=1, maxPoolSize=1 2024-12-10T11:46:23,587 DEBUG [RS:1;ef751fafe6b1:43719 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/ef751fafe6b1:0, corePoolSize=1, maxPoolSize=1 2024-12-10T11:46:23,587 DEBUG [RS:2;ef751fafe6b1:37723 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/ef751fafe6b1:0, corePoolSize=1, maxPoolSize=1 2024-12-10T11:46:23,587 DEBUG [RS:1;ef751fafe6b1:43719 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/ef751fafe6b1:0, corePoolSize=3, maxPoolSize=3 2024-12-10T11:46:23,587 DEBUG [RS:0;ef751fafe6b1:36793 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/ef751fafe6b1:0, corePoolSize=1, maxPoolSize=1 2024-12-10T11:46:23,587 DEBUG [RS:1;ef751fafe6b1:43719 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/ef751fafe6b1:0, corePoolSize=3, maxPoolSize=3 2024-12-10T11:46:23,587 DEBUG [RS:2;ef751fafe6b1:37723 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/ef751fafe6b1:0, corePoolSize=1, maxPoolSize=1 2024-12-10T11:46:23,587 DEBUG [RS:0;ef751fafe6b1:36793 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/ef751fafe6b1:0, corePoolSize=3, maxPoolSize=3 2024-12-10T11:46:23,587 DEBUG [RS:2;ef751fafe6b1:37723 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/ef751fafe6b1:0, corePoolSize=1, maxPoolSize=1 2024-12-10T11:46:23,587 DEBUG [RS:0;ef751fafe6b1:36793 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/ef751fafe6b1:0, corePoolSize=3, maxPoolSize=3 2024-12-10T11:46:23,587 DEBUG [RS:2;ef751fafe6b1:37723 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/ef751fafe6b1:0, corePoolSize=1, maxPoolSize=1 2024-12-10T11:46:23,587 DEBUG [RS:2;ef751fafe6b1:37723 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/ef751fafe6b1:0, corePoolSize=3, maxPoolSize=3 2024-12-10T11:46:23,587 DEBUG [RS:2;ef751fafe6b1:37723 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/ef751fafe6b1:0, corePoolSize=3, maxPoolSize=3 2024-12-10T11:46:23,588 INFO [RS:1;ef751fafe6b1:43719 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-10T11:46:23,588 INFO [RS:1;ef751fafe6b1:43719 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-10T11:46:23,589 INFO [RS:1;ef751fafe6b1:43719 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-10T11:46:23,589 INFO [RS:1;ef751fafe6b1:43719 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-10T11:46:23,589 INFO [RS:1;ef751fafe6b1:43719 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-10T11:46:23,589 INFO [RS:1;ef751fafe6b1:43719 {}] hbase.ChoreService(168): Chore ScheduledChore name=ef751fafe6b1,43719,1733831182093-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-10T11:46:23,589 INFO [RS:0;ef751fafe6b1:36793 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-10T11:46:23,589 INFO [RS:0;ef751fafe6b1:36793 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-10T11:46:23,589 INFO [RS:2;ef751fafe6b1:37723 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-10T11:46:23,589 INFO [RS:0;ef751fafe6b1:36793 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-10T11:46:23,589 INFO [RS:2;ef751fafe6b1:37723 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-10T11:46:23,589 INFO [RS:0;ef751fafe6b1:36793 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-10T11:46:23,589 INFO [RS:2;ef751fafe6b1:37723 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-10T11:46:23,590 INFO [RS:0;ef751fafe6b1:36793 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-10T11:46:23,590 INFO [RS:2;ef751fafe6b1:37723 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-10T11:46:23,590 INFO [RS:0;ef751fafe6b1:36793 {}] hbase.ChoreService(168): Chore ScheduledChore name=ef751fafe6b1,36793,1733831181979-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-10T11:46:23,590 INFO [RS:2;ef751fafe6b1:37723 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-10T11:46:23,590 INFO [RS:2;ef751fafe6b1:37723 {}] hbase.ChoreService(168): Chore ScheduledChore name=ef751fafe6b1,37723,1733831182165-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-10T11:46:23,592 WARN [ef751fafe6b1:40549 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-10T11:46:23,611 INFO [RS:1;ef751fafe6b1:43719 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-10T11:46:23,611 INFO [RS:2;ef751fafe6b1:37723 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-10T11:46:23,611 INFO [RS:0;ef751fafe6b1:36793 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-10T11:46:23,613 INFO [RS:1;ef751fafe6b1:43719 {}] hbase.ChoreService(168): Chore ScheduledChore name=ef751fafe6b1,43719,1733831182093-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-10T11:46:23,613 INFO [RS:0;ef751fafe6b1:36793 {}] hbase.ChoreService(168): Chore ScheduledChore name=ef751fafe6b1,36793,1733831181979-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-10T11:46:23,613 INFO [RS:2;ef751fafe6b1:37723 {}] hbase.ChoreService(168): Chore ScheduledChore name=ef751fafe6b1,37723,1733831182165-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-10T11:46:23,613 INFO [RS:1;ef751fafe6b1:43719 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-10T11:46:23,613 INFO [RS:0;ef751fafe6b1:36793 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-10T11:46:23,613 INFO [RS:2;ef751fafe6b1:37723 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-10T11:46:23,613 INFO [RS:0;ef751fafe6b1:36793 {}] regionserver.Replication(171): ef751fafe6b1,36793,1733831181979 started 2024-12-10T11:46:23,613 INFO [RS:2;ef751fafe6b1:37723 {}] regionserver.Replication(171): ef751fafe6b1,37723,1733831182165 started 2024-12-10T11:46:23,613 INFO [RS:1;ef751fafe6b1:43719 {}] regionserver.Replication(171): ef751fafe6b1,43719,1733831182093 started 2024-12-10T11:46:23,638 INFO [RS:1;ef751fafe6b1:43719 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-10T11:46:23,638 INFO [RS:2;ef751fafe6b1:37723 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-10T11:46:23,638 INFO [RS:0;ef751fafe6b1:36793 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-10T11:46:23,638 INFO [RS:1;ef751fafe6b1:43719 {}] regionserver.HRegionServer(1482): Serving as ef751fafe6b1,43719,1733831182093, RpcServer on ef751fafe6b1/172.17.0.2:43719, sessionid=0x1000f99ff790002 2024-12-10T11:46:23,638 INFO [RS:2;ef751fafe6b1:37723 {}] regionserver.HRegionServer(1482): Serving as ef751fafe6b1,37723,1733831182165, RpcServer on ef751fafe6b1/172.17.0.2:37723, sessionid=0x1000f99ff790003 2024-12-10T11:46:23,638 INFO [RS:0;ef751fafe6b1:36793 {}] regionserver.HRegionServer(1482): Serving as ef751fafe6b1,36793,1733831181979, RpcServer on ef751fafe6b1/172.17.0.2:36793, sessionid=0x1000f99ff790001 2024-12-10T11:46:23,639 DEBUG [RS:1;ef751fafe6b1:43719 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-10T11:46:23,639 DEBUG [RS:2;ef751fafe6b1:37723 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-10T11:46:23,639 DEBUG [RS:0;ef751fafe6b1:36793 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-10T11:46:23,639 DEBUG [RS:2;ef751fafe6b1:37723 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager ef751fafe6b1,37723,1733831182165 2024-12-10T11:46:23,639 DEBUG [RS:1;ef751fafe6b1:43719 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager ef751fafe6b1,43719,1733831182093 2024-12-10T11:46:23,639 DEBUG [RS:0;ef751fafe6b1:36793 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager ef751fafe6b1,36793,1733831181979 2024-12-10T11:46:23,640 DEBUG [RS:2;ef751fafe6b1:37723 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'ef751fafe6b1,37723,1733831182165' 2024-12-10T11:46:23,640 DEBUG [RS:0;ef751fafe6b1:36793 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'ef751fafe6b1,36793,1733831181979' 2024-12-10T11:46:23,640 DEBUG [RS:1;ef751fafe6b1:43719 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'ef751fafe6b1,43719,1733831182093' 2024-12-10T11:46:23,640 DEBUG [RS:1;ef751fafe6b1:43719 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-10T11:46:23,640 DEBUG [RS:2;ef751fafe6b1:37723 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-10T11:46:23,640 DEBUG [RS:0;ef751fafe6b1:36793 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-10T11:46:23,641 DEBUG [RS:0;ef751fafe6b1:36793 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-10T11:46:23,641 DEBUG [RS:1;ef751fafe6b1:43719 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-10T11:46:23,641 DEBUG [RS:2;ef751fafe6b1:37723 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-10T11:46:23,641 DEBUG [RS:1;ef751fafe6b1:43719 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-10T11:46:23,641 DEBUG [RS:1;ef751fafe6b1:43719 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-10T11:46:23,641 DEBUG [RS:0;ef751fafe6b1:36793 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-10T11:46:23,641 DEBUG [RS:2;ef751fafe6b1:37723 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-10T11:46:23,641 DEBUG [RS:0;ef751fafe6b1:36793 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-10T11:46:23,642 DEBUG [RS:2;ef751fafe6b1:37723 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-10T11:46:23,642 DEBUG [RS:1;ef751fafe6b1:43719 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager ef751fafe6b1,43719,1733831182093 2024-12-10T11:46:23,642 DEBUG [RS:0;ef751fafe6b1:36793 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager ef751fafe6b1,36793,1733831181979 2024-12-10T11:46:23,642 DEBUG [RS:2;ef751fafe6b1:37723 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager ef751fafe6b1,37723,1733831182165 2024-12-10T11:46:23,642 DEBUG [RS:1;ef751fafe6b1:43719 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'ef751fafe6b1,43719,1733831182093' 2024-12-10T11:46:23,642 DEBUG [RS:0;ef751fafe6b1:36793 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'ef751fafe6b1,36793,1733831181979' 2024-12-10T11:46:23,642 DEBUG [RS:2;ef751fafe6b1:37723 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'ef751fafe6b1,37723,1733831182165' 2024-12-10T11:46:23,642 DEBUG [RS:1;ef751fafe6b1:43719 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-10T11:46:23,642 DEBUG [RS:2;ef751fafe6b1:37723 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-10T11:46:23,642 DEBUG [RS:0;ef751fafe6b1:36793 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-10T11:46:23,642 DEBUG [RS:1;ef751fafe6b1:43719 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-10T11:46:23,642 DEBUG [RS:2;ef751fafe6b1:37723 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-10T11:46:23,642 DEBUG [RS:0;ef751fafe6b1:36793 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-10T11:46:23,643 DEBUG [RS:1;ef751fafe6b1:43719 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-10T11:46:23,643 INFO [RS:1;ef751fafe6b1:43719 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-10T11:46:23,643 DEBUG [RS:2;ef751fafe6b1:37723 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-10T11:46:23,643 INFO [RS:2;ef751fafe6b1:37723 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-10T11:46:23,643 INFO [RS:1;ef751fafe6b1:43719 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-10T11:46:23,643 DEBUG [RS:0;ef751fafe6b1:36793 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-10T11:46:23,643 INFO [RS:2;ef751fafe6b1:37723 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-10T11:46:23,643 INFO [RS:0;ef751fafe6b1:36793 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-10T11:46:23,643 INFO [RS:0;ef751fafe6b1:36793 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-10T11:46:23,753 INFO [RS:0;ef751fafe6b1:36793 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-10T11:46:23,753 INFO [RS:1;ef751fafe6b1:43719 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-10T11:46:23,753 INFO [RS:2;ef751fafe6b1:37723 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-10T11:46:23,756 INFO [RS:0;ef751fafe6b1:36793 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=ef751fafe6b1%2C36793%2C1733831181979, suffix=, logDir=hdfs://localhost:45975/user/jenkins/test-data/f70f2c4e-74b6-0388-0390-35db2b15c52d/WALs/ef751fafe6b1,36793,1733831181979, archiveDir=hdfs://localhost:45975/user/jenkins/test-data/f70f2c4e-74b6-0388-0390-35db2b15c52d/oldWALs, maxLogs=32 2024-12-10T11:46:23,756 INFO [RS:1;ef751fafe6b1:43719 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=ef751fafe6b1%2C43719%2C1733831182093, suffix=, logDir=hdfs://localhost:45975/user/jenkins/test-data/f70f2c4e-74b6-0388-0390-35db2b15c52d/WALs/ef751fafe6b1,43719,1733831182093, archiveDir=hdfs://localhost:45975/user/jenkins/test-data/f70f2c4e-74b6-0388-0390-35db2b15c52d/oldWALs, maxLogs=32 2024-12-10T11:46:23,756 INFO [RS:2;ef751fafe6b1:37723 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=ef751fafe6b1%2C37723%2C1733831182165, suffix=, logDir=hdfs://localhost:45975/user/jenkins/test-data/f70f2c4e-74b6-0388-0390-35db2b15c52d/WALs/ef751fafe6b1,37723,1733831182165, archiveDir=hdfs://localhost:45975/user/jenkins/test-data/f70f2c4e-74b6-0388-0390-35db2b15c52d/oldWALs, maxLogs=32 2024-12-10T11:46:23,772 DEBUG [RS:0;ef751fafe6b1:36793 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/f70f2c4e-74b6-0388-0390-35db2b15c52d/WALs/ef751fafe6b1,36793,1733831181979/ef751fafe6b1%2C36793%2C1733831181979.1733831183760, exclude list is [], retry=0 2024-12-10T11:46:23,777 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43797,DS-55f19d74-cb7e-4695-93f8-9554f6e22f52,DISK] 2024-12-10T11:46:23,777 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:33591,DS-d3c7f28e-8dd8-4a16-81c0-9f69b8e63380,DISK] 2024-12-10T11:46:23,777 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38893,DS-2f7c98c9-d0e7-4c9f-87fa-2c4211e06011,DISK] 2024-12-10T11:46:23,804 DEBUG [RS:1;ef751fafe6b1:43719 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/f70f2c4e-74b6-0388-0390-35db2b15c52d/WALs/ef751fafe6b1,43719,1733831182093/ef751fafe6b1%2C43719%2C1733831182093.1733831183760, exclude list is [], retry=0 2024-12-10T11:46:23,804 DEBUG [RS:2;ef751fafe6b1:37723 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/f70f2c4e-74b6-0388-0390-35db2b15c52d/WALs/ef751fafe6b1,37723,1733831182165/ef751fafe6b1%2C37723%2C1733831182165.1733831183760, exclude list is [], retry=0 2024-12-10T11:46:23,807 INFO [RS:0;ef751fafe6b1:36793 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/f70f2c4e-74b6-0388-0390-35db2b15c52d/WALs/ef751fafe6b1,36793,1733831181979/ef751fafe6b1%2C36793%2C1733831181979.1733831183760 2024-12-10T11:46:23,808 DEBUG [RS:0;ef751fafe6b1:36793 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:35301:35301),(127.0.0.1/127.0.0.1:34881:34881),(127.0.0.1/127.0.0.1:43525:43525)] 2024-12-10T11:46:23,810 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43797,DS-55f19d74-cb7e-4695-93f8-9554f6e22f52,DISK] 2024-12-10T11:46:23,810 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38893,DS-2f7c98c9-d0e7-4c9f-87fa-2c4211e06011,DISK] 2024-12-10T11:46:23,810 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:33591,DS-d3c7f28e-8dd8-4a16-81c0-9f69b8e63380,DISK] 2024-12-10T11:46:23,811 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:33591,DS-d3c7f28e-8dd8-4a16-81c0-9f69b8e63380,DISK] 2024-12-10T11:46:23,811 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43797,DS-55f19d74-cb7e-4695-93f8-9554f6e22f52,DISK] 2024-12-10T11:46:23,812 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38893,DS-2f7c98c9-d0e7-4c9f-87fa-2c4211e06011,DISK] 2024-12-10T11:46:23,819 INFO [RS:2;ef751fafe6b1:37723 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/f70f2c4e-74b6-0388-0390-35db2b15c52d/WALs/ef751fafe6b1,37723,1733831182165/ef751fafe6b1%2C37723%2C1733831182165.1733831183760 2024-12-10T11:46:23,819 INFO [RS:1;ef751fafe6b1:43719 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/f70f2c4e-74b6-0388-0390-35db2b15c52d/WALs/ef751fafe6b1,43719,1733831182093/ef751fafe6b1%2C43719%2C1733831182093.1733831183760 2024-12-10T11:46:23,819 DEBUG [RS:2;ef751fafe6b1:37723 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:35301:35301),(127.0.0.1/127.0.0.1:43525:43525),(127.0.0.1/127.0.0.1:34881:34881)] 2024-12-10T11:46:23,820 DEBUG [RS:1;ef751fafe6b1:43719 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:34881:34881),(127.0.0.1/127.0.0.1:43525:43525),(127.0.0.1/127.0.0.1:35301:35301)] 2024-12-10T11:46:23,844 DEBUG [ef751fafe6b1:40549 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=3, allServersCount=3 2024-12-10T11:46:23,851 DEBUG [ef751fafe6b1:40549 {}] balancer.BalancerClusterState(204): Hosts are {ef751fafe6b1=0} racks are {/default-rack=0} 2024-12-10T11:46:23,857 DEBUG [ef751fafe6b1:40549 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-10T11:46:23,857 DEBUG [ef751fafe6b1:40549 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-10T11:46:23,857 DEBUG [ef751fafe6b1:40549 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-10T11:46:23,857 DEBUG [ef751fafe6b1:40549 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-10T11:46:23,857 DEBUG [ef751fafe6b1:40549 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-10T11:46:23,857 DEBUG [ef751fafe6b1:40549 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-10T11:46:23,857 INFO [ef751fafe6b1:40549 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-10T11:46:23,857 INFO [ef751fafe6b1:40549 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-10T11:46:23,857 INFO [ef751fafe6b1:40549 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-10T11:46:23,857 DEBUG [ef751fafe6b1:40549 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-10T11:46:23,864 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=ef751fafe6b1,43719,1733831182093 2024-12-10T11:46:23,869 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as ef751fafe6b1,43719,1733831182093, state=OPENING 2024-12-10T11:46:23,904 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-10T11:46:23,913 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36793-0x1000f99ff790001, quorum=127.0.0.1:59713, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T11:46:23,913 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43719-0x1000f99ff790002, quorum=127.0.0.1:59713, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T11:46:23,913 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37723-0x1000f99ff790003, quorum=127.0.0.1:59713, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T11:46:23,913 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40549-0x1000f99ff790000, quorum=127.0.0.1:59713, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T11:46:23,913 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-10T11:46:23,913 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-10T11:46:23,913 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-10T11:46:23,914 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-10T11:46:23,915 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-10T11:46:23,917 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=ef751fafe6b1,43719,1733831182093}] 2024-12-10T11:46:24,108 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-10T11:46:24,110 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37591, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-10T11:46:24,122 INFO [RS_OPEN_META-regionserver/ef751fafe6b1:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-10T11:46:24,122 INFO [RS_OPEN_META-regionserver/ef751fafe6b1:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-10T11:46:24,123 INFO [RS_OPEN_META-regionserver/ef751fafe6b1:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-12-10T11:46:24,127 INFO [RS_OPEN_META-regionserver/ef751fafe6b1:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=ef751fafe6b1%2C43719%2C1733831182093.meta, suffix=.meta, logDir=hdfs://localhost:45975/user/jenkins/test-data/f70f2c4e-74b6-0388-0390-35db2b15c52d/WALs/ef751fafe6b1,43719,1733831182093, archiveDir=hdfs://localhost:45975/user/jenkins/test-data/f70f2c4e-74b6-0388-0390-35db2b15c52d/oldWALs, maxLogs=32 2024-12-10T11:46:24,148 DEBUG [RS_OPEN_META-regionserver/ef751fafe6b1:0-0 {event_type=M_RS_OPEN_META, pid=3}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/f70f2c4e-74b6-0388-0390-35db2b15c52d/WALs/ef751fafe6b1,43719,1733831182093/ef751fafe6b1%2C43719%2C1733831182093.meta.1733831184128.meta, exclude list is [], retry=0 2024-12-10T11:46:24,153 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:33591,DS-d3c7f28e-8dd8-4a16-81c0-9f69b8e63380,DISK] 2024-12-10T11:46:24,153 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43797,DS-55f19d74-cb7e-4695-93f8-9554f6e22f52,DISK] 2024-12-10T11:46:24,153 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38893,DS-2f7c98c9-d0e7-4c9f-87fa-2c4211e06011,DISK] 2024-12-10T11:46:24,156 INFO [RS_OPEN_META-regionserver/ef751fafe6b1:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/f70f2c4e-74b6-0388-0390-35db2b15c52d/WALs/ef751fafe6b1,43719,1733831182093/ef751fafe6b1%2C43719%2C1733831182093.meta.1733831184128.meta 2024-12-10T11:46:24,157 DEBUG [RS_OPEN_META-regionserver/ef751fafe6b1:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:34881:34881),(127.0.0.1/127.0.0.1:35301:35301),(127.0.0.1/127.0.0.1:43525:43525)] 2024-12-10T11:46:24,157 DEBUG [RS_OPEN_META-regionserver/ef751fafe6b1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-10T11:46:24,158 DEBUG [RS_OPEN_META-regionserver/ef751fafe6b1:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-10T11:46:24,160 DEBUG [RS_OPEN_META-regionserver/ef751fafe6b1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-10T11:46:24,164 INFO [RS_OPEN_META-regionserver/ef751fafe6b1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-10T11:46:24,168 DEBUG [RS_OPEN_META-regionserver/ef751fafe6b1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-10T11:46:24,168 DEBUG [RS_OPEN_META-regionserver/ef751fafe6b1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T11:46:24,168 DEBUG [RS_OPEN_META-regionserver/ef751fafe6b1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-10T11:46:24,168 DEBUG [RS_OPEN_META-regionserver/ef751fafe6b1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-10T11:46:24,172 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-10T11:46:24,174 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-10T11:46:24,174 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T11:46:24,175 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-10T11:46:24,175 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-10T11:46:24,176 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-10T11:46:24,176 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T11:46:24,178 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-10T11:46:24,178 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-10T11:46:24,179 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-10T11:46:24,180 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T11:46:24,181 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-10T11:46:24,181 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-10T11:46:24,182 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-10T11:46:24,182 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T11:46:24,183 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-10T11:46:24,183 DEBUG [RS_OPEN_META-regionserver/ef751fafe6b1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-10T11:46:24,185 DEBUG [RS_OPEN_META-regionserver/ef751fafe6b1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45975/user/jenkins/test-data/f70f2c4e-74b6-0388-0390-35db2b15c52d/data/hbase/meta/1588230740 2024-12-10T11:46:24,187 DEBUG [RS_OPEN_META-regionserver/ef751fafe6b1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45975/user/jenkins/test-data/f70f2c4e-74b6-0388-0390-35db2b15c52d/data/hbase/meta/1588230740 2024-12-10T11:46:24,189 DEBUG [RS_OPEN_META-regionserver/ef751fafe6b1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-10T11:46:24,189 DEBUG [RS_OPEN_META-regionserver/ef751fafe6b1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-10T11:46:24,189 DEBUG [RS_OPEN_META-regionserver/ef751fafe6b1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-10T11:46:24,191 DEBUG [RS_OPEN_META-regionserver/ef751fafe6b1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-10T11:46:24,193 INFO [RS_OPEN_META-regionserver/ef751fafe6b1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=60766444, jitterRate=-0.09450942277908325}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-10T11:46:24,193 DEBUG [RS_OPEN_META-regionserver/ef751fafe6b1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-10T11:46:24,194 DEBUG [RS_OPEN_META-regionserver/ef751fafe6b1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733831184169Writing region info on filesystem at 1733831184169Initializing all the Stores at 1733831184170 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733831184171 (+1 ms)Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733831184172 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733831184172Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733831184172Cleaning up temporary data from old regions at 1733831184189 (+17 ms)Running coprocessor post-open hooks at 1733831184193 (+4 ms)Region opened successfully at 1733831184194 (+1 ms) 2024-12-10T11:46:24,200 INFO [RS_OPEN_META-regionserver/ef751fafe6b1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733831184084 2024-12-10T11:46:24,211 DEBUG [RS_OPEN_META-regionserver/ef751fafe6b1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-10T11:46:24,211 INFO [RS_OPEN_META-regionserver/ef751fafe6b1:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-10T11:46:24,213 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=ef751fafe6b1,43719,1733831182093 2024-12-10T11:46:24,215 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as ef751fafe6b1,43719,1733831182093, state=OPEN 2024-12-10T11:46:24,238 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37723-0x1000f99ff790003, quorum=127.0.0.1:59713, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-10T11:46:24,238 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36793-0x1000f99ff790001, quorum=127.0.0.1:59713, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-10T11:46:24,238 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40549-0x1000f99ff790000, quorum=127.0.0.1:59713, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-10T11:46:24,238 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43719-0x1000f99ff790002, quorum=127.0.0.1:59713, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-10T11:46:24,238 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-10T11:46:24,238 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-10T11:46:24,238 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-10T11:46:24,238 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-10T11:46:24,239 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=ef751fafe6b1,43719,1733831182093 2024-12-10T11:46:24,247 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-10T11:46:24,247 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=ef751fafe6b1,43719,1733831182093 in 323 msec 2024-12-10T11:46:24,254 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-10T11:46:24,254 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 819 msec 2024-12-10T11:46:24,256 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-10T11:46:24,256 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-10T11:46:24,272 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-10T11:46:24,273 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=ef751fafe6b1,43719,1733831182093, seqNum=-1] 2024-12-10T11:46:24,289 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T11:46:24,291 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33217, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T11:46:24,308 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.0870 sec 2024-12-10T11:46:24,308 INFO [master/ef751fafe6b1:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733831184308, completionTime=-1 2024-12-10T11:46:24,311 INFO [master/ef751fafe6b1:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=3; waited=0ms, expected min=3 server(s), max=3 server(s), master is running 2024-12-10T11:46:24,311 DEBUG [master/ef751fafe6b1:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-10T11:46:24,333 INFO [master/ef751fafe6b1:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=3 2024-12-10T11:46:24,333 INFO [master/ef751fafe6b1:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733831244333 2024-12-10T11:46:24,333 INFO [master/ef751fafe6b1:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733831304333 2024-12-10T11:46:24,333 INFO [master/ef751fafe6b1:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 22 msec 2024-12-10T11:46:24,334 DEBUG [master/ef751fafe6b1:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 1588230740 changed from -1.0 to 0.0, refreshing cache 2024-12-10T11:46:24,340 INFO [master/ef751fafe6b1:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ef751fafe6b1,40549,1733831181287-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-10T11:46:24,340 INFO [master/ef751fafe6b1:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ef751fafe6b1,40549,1733831181287-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-10T11:46:24,340 INFO [master/ef751fafe6b1:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ef751fafe6b1,40549,1733831181287-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-10T11:46:24,341 INFO [master/ef751fafe6b1:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-ef751fafe6b1:40549, period=300000, unit=MILLISECONDS is enabled. 2024-12-10T11:46:24,341 INFO [master/ef751fafe6b1:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-10T11:46:24,342 INFO [master/ef751fafe6b1:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-10T11:46:24,347 DEBUG [master/ef751fafe6b1:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-10T11:46:24,367 INFO [master/ef751fafe6b1:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 2.056sec 2024-12-10T11:46:24,368 INFO [master/ef751fafe6b1:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-10T11:46:24,369 INFO [master/ef751fafe6b1:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-10T11:46:24,370 INFO [master/ef751fafe6b1:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-10T11:46:24,370 INFO [master/ef751fafe6b1:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-10T11:46:24,371 INFO [master/ef751fafe6b1:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-10T11:46:24,372 INFO [master/ef751fafe6b1:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ef751fafe6b1,40549,1733831181287-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-10T11:46:24,372 INFO [master/ef751fafe6b1:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ef751fafe6b1,40549,1733831181287-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-10T11:46:24,376 DEBUG [master/ef751fafe6b1:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-10T11:46:24,377 INFO [master/ef751fafe6b1:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-10T11:46:24,377 INFO [master/ef751fafe6b1:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ef751fafe6b1,40549,1733831181287-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-10T11:46:24,426 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3ad5c43f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T11:46:24,430 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-12-10T11:46:24,431 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-12-10T11:46:24,435 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request ef751fafe6b1,40549,-1 for getting cluster id 2024-12-10T11:46:24,437 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-10T11:46:24,447 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'ebf1fa2c-a6ab-4b5f-8dca-c14901266ac9' 2024-12-10T11:46:24,449 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-10T11:46:24,450 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "ebf1fa2c-a6ab-4b5f-8dca-c14901266ac9" 2024-12-10T11:46:24,452 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1f935e1f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T11:46:24,452 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [ef751fafe6b1,40549,-1] 2024-12-10T11:46:24,455 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-10T11:46:24,457 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T11:46:24,458 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60468, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-10T11:46:24,460 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6b1ff74e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T11:46:24,461 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-10T11:46:24,467 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=ef751fafe6b1,43719,1733831182093, seqNum=-1] 2024-12-10T11:46:24,468 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T11:46:24,470 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58512, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T11:46:24,486 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=ef751fafe6b1,40549,1733831181287 2024-12-10T11:46:24,490 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-10T11:46:24,494 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.AsyncConnectionImpl(321): The fetched master address is ef751fafe6b1,40549,1733831181287 2024-12-10T11:46:24,495 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@517aaee6 2024-12-10T11:46:24,496 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-10T11:46:24,498 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60484, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-10T11:46:24,503 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40549 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-10T11:46:24,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40549 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC 2024-12-10T11:46:24,513 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_PRE_OPERATION 2024-12-10T11:46:24,515 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40549 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestHBaseWalOnEC" procId is: 4 2024-12-10T11:46:24,516 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T11:46:24,519 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-10T11:46:24,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40549 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-10T11:46:24,527 WARN [PEWorker-3 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-10T11:46:24,527 WARN [PEWorker-3 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-10T11:46:24,530 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-884346255_22 at /127.0.0.1:49708 [Receiving block BP-1050097718-172.17.0.2-1733831177438:blk_-9223372036854775680_1020] {}] datanode.DataXceiver(331): 127.0.0.1:38893:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:49708 dst: /127.0.0.1:38893 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T11:46:24,535 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38893 is added to blk_-9223372036854775680_1021 (size=392) 2024-12-10T11:46:24,536 WARN [PEWorker-3 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-10T11:46:24,538 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 07cec63129f27f8febb1b3fad8c66956, NAME => 'TestHBaseWalOnEC,,1733831184500.07cec63129f27f8febb1b3fad8c66956.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:45975/user/jenkins/test-data/f70f2c4e-74b6-0388-0390-35db2b15c52d 2024-12-10T11:46:24,544 WARN [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-10T11:46:24,544 WARN [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-10T11:46:24,549 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-884346255_22 at /127.0.0.1:50182 [Receiving block BP-1050097718-172.17.0.2-1733831177438:blk_-9223372036854775664_1022] {}] datanode.DataXceiver(331): 127.0.0.1:43797:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:50182 dst: /127.0.0.1:43797 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T11:46:24,554 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43797 is added to blk_-9223372036854775664_1023 (size=51) 2024-12-10T11:46:24,555 WARN [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-10T11:46:24,555 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1733831184500.07cec63129f27f8febb1b3fad8c66956.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T11:46:24,555 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1722): Closing 07cec63129f27f8febb1b3fad8c66956, disabling compactions & flushes 2024-12-10T11:46:24,556 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1733831184500.07cec63129f27f8febb1b3fad8c66956. 2024-12-10T11:46:24,556 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1733831184500.07cec63129f27f8febb1b3fad8c66956. 2024-12-10T11:46:24,556 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1733831184500.07cec63129f27f8febb1b3fad8c66956. after waiting 0 ms 2024-12-10T11:46:24,556 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1733831184500.07cec63129f27f8febb1b3fad8c66956. 2024-12-10T11:46:24,556 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1733831184500.07cec63129f27f8febb1b3fad8c66956. 2024-12-10T11:46:24,556 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1676): Region close journal for 07cec63129f27f8febb1b3fad8c66956: Waiting for close lock at 1733831184555Disabling compacts and flushes for region at 1733831184555Disabling writes for close at 1733831184556 (+1 ms)Writing region close event to WAL at 1733831184556Closed at 1733831184556 2024-12-10T11:46:24,558 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ADD_TO_META 2024-12-10T11:46:24,563 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestHBaseWalOnEC,,1733831184500.07cec63129f27f8febb1b3fad8c66956.","families":{"info":[{"qualifier":"regioninfo","vlen":50,"tag":[],"timestamp":"1733831184558"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733831184558"}]},"ts":"1733831184558"} 2024-12-10T11:46:24,567 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-10T11:46:24,569 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-10T11:46:24,571 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733831184569"}]},"ts":"1733831184569"} 2024-12-10T11:46:24,575 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLING in hbase:meta 2024-12-10T11:46:24,576 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {ef751fafe6b1=0} racks are {/default-rack=0} 2024-12-10T11:46:24,577 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-10T11:46:24,578 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-10T11:46:24,578 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-10T11:46:24,578 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-10T11:46:24,578 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-10T11:46:24,578 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-10T11:46:24,578 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-10T11:46:24,578 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-10T11:46:24,578 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-10T11:46:24,578 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-10T11:46:24,580 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=07cec63129f27f8febb1b3fad8c66956, ASSIGN}] 2024-12-10T11:46:24,583 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=07cec63129f27f8febb1b3fad8c66956, ASSIGN 2024-12-10T11:46:24,585 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=07cec63129f27f8febb1b3fad8c66956, ASSIGN; state=OFFLINE, location=ef751fafe6b1,43719,1733831182093; forceNewPlan=false, retain=false 2024-12-10T11:46:24,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40549 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-10T11:46:24,739 INFO [ef751fafe6b1:40549 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-10T11:46:24,741 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=07cec63129f27f8febb1b3fad8c66956, regionState=OPENING, regionLocation=ef751fafe6b1,43719,1733831182093 2024-12-10T11:46:24,748 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=07cec63129f27f8febb1b3fad8c66956, ASSIGN because future has completed 2024-12-10T11:46:24,750 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 07cec63129f27f8febb1b3fad8c66956, server=ef751fafe6b1,43719,1733831182093}] 2024-12-10T11:46:24,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40549 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-10T11:46:24,912 INFO [RS_OPEN_REGION-regionserver/ef751fafe6b1:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestHBaseWalOnEC,,1733831184500.07cec63129f27f8febb1b3fad8c66956. 2024-12-10T11:46:24,912 DEBUG [RS_OPEN_REGION-regionserver/ef751fafe6b1:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 07cec63129f27f8febb1b3fad8c66956, NAME => 'TestHBaseWalOnEC,,1733831184500.07cec63129f27f8febb1b3fad8c66956.', STARTKEY => '', ENDKEY => ''} 2024-12-10T11:46:24,913 DEBUG [RS_OPEN_REGION-regionserver/ef751fafe6b1:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestHBaseWalOnEC 07cec63129f27f8febb1b3fad8c66956 2024-12-10T11:46:24,913 DEBUG [RS_OPEN_REGION-regionserver/ef751fafe6b1:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1733831184500.07cec63129f27f8febb1b3fad8c66956.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T11:46:24,913 DEBUG [RS_OPEN_REGION-regionserver/ef751fafe6b1:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 07cec63129f27f8febb1b3fad8c66956 2024-12-10T11:46:24,913 DEBUG [RS_OPEN_REGION-regionserver/ef751fafe6b1:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 07cec63129f27f8febb1b3fad8c66956 2024-12-10T11:46:24,916 INFO [StoreOpener-07cec63129f27f8febb1b3fad8c66956-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 07cec63129f27f8febb1b3fad8c66956 2024-12-10T11:46:24,918 INFO [StoreOpener-07cec63129f27f8febb1b3fad8c66956-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 07cec63129f27f8febb1b3fad8c66956 columnFamilyName cf 2024-12-10T11:46:24,918 DEBUG [StoreOpener-07cec63129f27f8febb1b3fad8c66956-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T11:46:24,919 INFO [StoreOpener-07cec63129f27f8febb1b3fad8c66956-1 {}] regionserver.HStore(327): Store=07cec63129f27f8febb1b3fad8c66956/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T11:46:24,919 DEBUG [RS_OPEN_REGION-regionserver/ef751fafe6b1:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 07cec63129f27f8febb1b3fad8c66956 2024-12-10T11:46:24,920 DEBUG [RS_OPEN_REGION-regionserver/ef751fafe6b1:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45975/user/jenkins/test-data/f70f2c4e-74b6-0388-0390-35db2b15c52d/data/default/TestHBaseWalOnEC/07cec63129f27f8febb1b3fad8c66956 2024-12-10T11:46:24,921 DEBUG [RS_OPEN_REGION-regionserver/ef751fafe6b1:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45975/user/jenkins/test-data/f70f2c4e-74b6-0388-0390-35db2b15c52d/data/default/TestHBaseWalOnEC/07cec63129f27f8febb1b3fad8c66956 2024-12-10T11:46:24,921 DEBUG [RS_OPEN_REGION-regionserver/ef751fafe6b1:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 07cec63129f27f8febb1b3fad8c66956 2024-12-10T11:46:24,921 DEBUG [RS_OPEN_REGION-regionserver/ef751fafe6b1:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 07cec63129f27f8febb1b3fad8c66956 2024-12-10T11:46:24,924 DEBUG [RS_OPEN_REGION-regionserver/ef751fafe6b1:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 07cec63129f27f8febb1b3fad8c66956 2024-12-10T11:46:24,929 DEBUG [RS_OPEN_REGION-regionserver/ef751fafe6b1:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45975/user/jenkins/test-data/f70f2c4e-74b6-0388-0390-35db2b15c52d/data/default/TestHBaseWalOnEC/07cec63129f27f8febb1b3fad8c66956/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-10T11:46:24,930 INFO [RS_OPEN_REGION-regionserver/ef751fafe6b1:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 07cec63129f27f8febb1b3fad8c66956; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=68545231, jitterRate=0.021403536200523376}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-10T11:46:24,930 DEBUG [RS_OPEN_REGION-regionserver/ef751fafe6b1:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 07cec63129f27f8febb1b3fad8c66956 2024-12-10T11:46:24,931 DEBUG [RS_OPEN_REGION-regionserver/ef751fafe6b1:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 07cec63129f27f8febb1b3fad8c66956: Running coprocessor pre-open hook at 1733831184913Writing region info on filesystem at 1733831184913Initializing all the Stores at 1733831184915 (+2 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733831184915Cleaning up temporary data from old regions at 1733831184922 (+7 ms)Running coprocessor post-open hooks at 1733831184930 (+8 ms)Region opened successfully at 1733831184931 (+1 ms) 2024-12-10T11:46:24,932 INFO [RS_OPEN_REGION-regionserver/ef751fafe6b1:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestHBaseWalOnEC,,1733831184500.07cec63129f27f8febb1b3fad8c66956., pid=6, masterSystemTime=1733831184904 2024-12-10T11:46:24,935 DEBUG [RS_OPEN_REGION-regionserver/ef751fafe6b1:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestHBaseWalOnEC,,1733831184500.07cec63129f27f8febb1b3fad8c66956. 2024-12-10T11:46:24,935 INFO [RS_OPEN_REGION-regionserver/ef751fafe6b1:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestHBaseWalOnEC,,1733831184500.07cec63129f27f8febb1b3fad8c66956. 2024-12-10T11:46:24,936 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=07cec63129f27f8febb1b3fad8c66956, regionState=OPEN, openSeqNum=2, regionLocation=ef751fafe6b1,43719,1733831182093 2024-12-10T11:46:24,939 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 07cec63129f27f8febb1b3fad8c66956, server=ef751fafe6b1,43719,1733831182093 because future has completed 2024-12-10T11:46:24,945 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-10T11:46:24,945 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 07cec63129f27f8febb1b3fad8c66956, server=ef751fafe6b1,43719,1733831182093 in 193 msec 2024-12-10T11:46:24,948 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-10T11:46:24,948 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=07cec63129f27f8febb1b3fad8c66956, ASSIGN in 365 msec 2024-12-10T11:46:24,950 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-10T11:46:24,950 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733831184950"}]},"ts":"1733831184950"} 2024-12-10T11:46:24,953 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLED in hbase:meta 2024-12-10T11:46:24,954 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_POST_OPERATION 2024-12-10T11:46:24,957 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC in 448 msec 2024-12-10T11:46:25,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40549 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-10T11:46:25,147 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestHBaseWalOnEC completed 2024-12-10T11:46:25,148 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table TestHBaseWalOnEC get assigned. Timeout = 60000ms 2024-12-10T11:46:25,150 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-10T11:46:25,159 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table TestHBaseWalOnEC assigned to meta. Checking AM states. 2024-12-10T11:46:25,159 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-10T11:46:25,160 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table TestHBaseWalOnEC assigned. 2024-12-10T11:46:25,169 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestHBaseWalOnEC', row='row', locateType=CURRENT is [region=TestHBaseWalOnEC,,1733831184500.07cec63129f27f8febb1b3fad8c66956., hostname=ef751fafe6b1,43719,1733831182093, seqNum=2] 2024-12-10T11:46:25,183 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40549 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestHBaseWalOnEC 2024-12-10T11:46:25,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40549 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC 2024-12-10T11:46:25,191 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_PREPARE 2024-12-10T11:46:25,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40549 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-10T11:46:25,193 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-10T11:46:25,195 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-10T11:46:25,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40549 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-10T11:46:25,362 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43719 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-12-10T11:46:25,363 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ef751fafe6b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestHBaseWalOnEC,,1733831184500.07cec63129f27f8febb1b3fad8c66956. 2024-12-10T11:46:25,366 INFO [RS_FLUSH_OPERATIONS-regionserver/ef751fafe6b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing 07cec63129f27f8febb1b3fad8c66956 1/1 column families, dataSize=32 B heapSize=360 B 2024-12-10T11:46:25,417 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ef751fafe6b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45975/user/jenkins/test-data/f70f2c4e-74b6-0388-0390-35db2b15c52d/data/default/TestHBaseWalOnEC/07cec63129f27f8febb1b3fad8c66956/.tmp/cf/efda442ba3f449aaaa59690273da1450 is 36, key is row/cf:cq/1733831185171/Put/seqid=0 2024-12-10T11:46:25,424 WARN [RS_FLUSH_OPERATIONS-regionserver/ef751fafe6b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-10T11:46:25,425 WARN [RS_FLUSH_OPERATIONS-regionserver/ef751fafe6b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-10T11:46:25,428 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1346975461_22 at /127.0.0.1:49720 [Receiving block BP-1050097718-172.17.0.2-1733831177438:blk_-9223372036854775648_1024] {}] datanode.DataXceiver(331): 127.0.0.1:38893:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:49720 dst: /127.0.0.1:38893 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T11:46:25,434 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38893 is added to blk_-9223372036854775648_1025 (size=4787) 2024-12-10T11:46:25,435 WARN [RS_FLUSH_OPERATIONS-regionserver/ef751fafe6b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-10T11:46:25,435 INFO [RS_FLUSH_OPERATIONS-regionserver/ef751fafe6b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=32 B at sequenceid=5 (bloomFilter=false), to=hdfs://localhost:45975/user/jenkins/test-data/f70f2c4e-74b6-0388-0390-35db2b15c52d/data/default/TestHBaseWalOnEC/07cec63129f27f8febb1b3fad8c66956/.tmp/cf/efda442ba3f449aaaa59690273da1450 2024-12-10T11:46:25,471 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ef751fafe6b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45975/user/jenkins/test-data/f70f2c4e-74b6-0388-0390-35db2b15c52d/data/default/TestHBaseWalOnEC/07cec63129f27f8febb1b3fad8c66956/.tmp/cf/efda442ba3f449aaaa59690273da1450 as hdfs://localhost:45975/user/jenkins/test-data/f70f2c4e-74b6-0388-0390-35db2b15c52d/data/default/TestHBaseWalOnEC/07cec63129f27f8febb1b3fad8c66956/cf/efda442ba3f449aaaa59690273da1450 2024-12-10T11:46:25,482 INFO [RS_FLUSH_OPERATIONS-regionserver/ef751fafe6b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45975/user/jenkins/test-data/f70f2c4e-74b6-0388-0390-35db2b15c52d/data/default/TestHBaseWalOnEC/07cec63129f27f8febb1b3fad8c66956/cf/efda442ba3f449aaaa59690273da1450, entries=1, sequenceid=5, filesize=4.7 K 2024-12-10T11:46:25,490 INFO [RS_FLUSH_OPERATIONS-regionserver/ef751fafe6b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~32 B/32, heapSize ~344 B/344, currentSize=0 B/0 for 07cec63129f27f8febb1b3fad8c66956 in 121ms, sequenceid=5, compaction requested=false 2024-12-10T11:46:25,491 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ef751fafe6b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestHBaseWalOnEC' 2024-12-10T11:46:25,493 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ef751fafe6b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for 07cec63129f27f8febb1b3fad8c66956: 2024-12-10T11:46:25,493 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ef751fafe6b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestHBaseWalOnEC,,1733831184500.07cec63129f27f8febb1b3fad8c66956. 2024-12-10T11:46:25,494 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ef751fafe6b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-12-10T11:46:25,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40549 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-12-10T11:46:25,503 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-12-10T11:46:25,503 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 303 msec 2024-12-10T11:46:25,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40549 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-10T11:46:25,507 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC in 319 msec 2024-12-10T11:46:25,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40549 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-10T11:46:25,816 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestHBaseWalOnEC completed 2024-12-10T11:46:25,830 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-10T11:46:25,831 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-10T11:46:25,831 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-10T11:46:25,834 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T11:46:25,835 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T11:46:25,835 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-10T11:46:25,835 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-10T11:46:25,835 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=887671035, stopped=false 2024-12-10T11:46:25,835 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=ef751fafe6b1,40549,1733831181287 2024-12-10T11:46:25,896 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40549-0x1000f99ff790000, quorum=127.0.0.1:59713, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-10T11:46:25,896 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37723-0x1000f99ff790003, quorum=127.0.0.1:59713, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-10T11:46:25,896 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36793-0x1000f99ff790001, quorum=127.0.0.1:59713, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-10T11:46:25,896 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40549-0x1000f99ff790000, quorum=127.0.0.1:59713, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T11:46:25,896 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37723-0x1000f99ff790003, quorum=127.0.0.1:59713, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T11:46:25,896 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43719-0x1000f99ff790002, quorum=127.0.0.1:59713, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-10T11:46:25,896 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36793-0x1000f99ff790001, quorum=127.0.0.1:59713, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T11:46:25,896 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43719-0x1000f99ff790002, quorum=127.0.0.1:59713, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T11:46:25,896 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-10T11:46:25,897 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-10T11:46:25,897 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-10T11:46:25,897 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:40549-0x1000f99ff790000, quorum=127.0.0.1:59713, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-10T11:46:25,897 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T11:46:25,898 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:43719-0x1000f99ff790002, quorum=127.0.0.1:59713, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-10T11:46:25,898 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:37723-0x1000f99ff790003, quorum=127.0.0.1:59713, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-10T11:46:25,898 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:36793-0x1000f99ff790001, quorum=127.0.0.1:59713, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-10T11:46:25,899 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'ef751fafe6b1,36793,1733831181979' ***** 2024-12-10T11:46:25,899 INFO [RS:0;ef751fafe6b1:36793 {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'ef751fafe6b1,36793,1733831181979' ***** 2024-12-10T11:46:25,899 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-10T11:46:25,899 INFO [RS:0;ef751fafe6b1:36793 {}] regionserver.HRegionServer(2210): STOPPED: Exiting; cluster shutdown set and not carrying any regions 2024-12-10T11:46:25,899 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'ef751fafe6b1,43719,1733831182093' ***** 2024-12-10T11:46:25,899 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-10T11:46:25,899 INFO [RS:2;ef751fafe6b1:37723 {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'ef751fafe6b1,37723,1733831182165' ***** 2024-12-10T11:46:25,899 INFO [RS:2;ef751fafe6b1:37723 {}] regionserver.HRegionServer(2210): STOPPED: Exiting; cluster shutdown set and not carrying any regions 2024-12-10T11:46:25,900 INFO [RS:1;ef751fafe6b1:43719 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-10T11:46:25,901 INFO [RS:1;ef751fafe6b1:43719 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-10T11:46:25,901 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-10T11:46:25,901 INFO [RS:1;ef751fafe6b1:43719 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-10T11:46:25,901 INFO [RS:1;ef751fafe6b1:43719 {}] regionserver.HRegionServer(3091): Received CLOSE for 07cec63129f27f8febb1b3fad8c66956 2024-12-10T11:46:25,902 INFO [RS:1;ef751fafe6b1:43719 {}] regionserver.HRegionServer(959): stopping server ef751fafe6b1,43719,1733831182093 2024-12-10T11:46:25,902 INFO [RS:1;ef751fafe6b1:43719 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-10T11:46:25,902 INFO [RS:0;ef751fafe6b1:36793 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-10T11:46:25,902 INFO [RS:1;ef751fafe6b1:43719 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;ef751fafe6b1:43719. 2024-12-10T11:46:25,902 INFO [RS:0;ef751fafe6b1:36793 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-10T11:46:25,902 DEBUG [RS:1;ef751fafe6b1:43719 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-10T11:46:25,902 INFO [RS:0;ef751fafe6b1:36793 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-10T11:46:25,902 DEBUG [RS:1;ef751fafe6b1:43719 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T11:46:25,902 INFO [RS:0;ef751fafe6b1:36793 {}] regionserver.HRegionServer(959): stopping server ef751fafe6b1,36793,1733831181979 2024-12-10T11:46:25,902 INFO [RS:2;ef751fafe6b1:37723 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-10T11:46:25,902 INFO [RS:0;ef751fafe6b1:36793 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-10T11:46:25,902 DEBUG [RS_CLOSE_REGION-regionserver/ef751fafe6b1:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 07cec63129f27f8febb1b3fad8c66956, disabling compactions & flushes 2024-12-10T11:46:25,902 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-10T11:46:25,902 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-10T11:46:25,903 INFO [RS:0;ef751fafe6b1:36793 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;ef751fafe6b1:36793. 2024-12-10T11:46:25,903 INFO [RS:1;ef751fafe6b1:43719 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-10T11:46:25,903 INFO [RS_CLOSE_REGION-regionserver/ef751fafe6b1:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1733831184500.07cec63129f27f8febb1b3fad8c66956. 2024-12-10T11:46:25,903 INFO [RS:1;ef751fafe6b1:43719 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-10T11:46:25,903 DEBUG [RS:0;ef751fafe6b1:36793 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-10T11:46:25,903 INFO [RS:1;ef751fafe6b1:43719 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-10T11:46:25,903 DEBUG [RS_CLOSE_REGION-regionserver/ef751fafe6b1:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1733831184500.07cec63129f27f8febb1b3fad8c66956. 2024-12-10T11:46:25,903 DEBUG [RS:0;ef751fafe6b1:36793 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T11:46:25,903 INFO [RS:2;ef751fafe6b1:37723 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-10T11:46:25,903 INFO [RS:2;ef751fafe6b1:37723 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-10T11:46:25,903 DEBUG [RS_CLOSE_REGION-regionserver/ef751fafe6b1:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1733831184500.07cec63129f27f8febb1b3fad8c66956. after waiting 0 ms 2024-12-10T11:46:25,903 INFO [RS:1;ef751fafe6b1:43719 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-10T11:46:25,903 INFO [RS:2;ef751fafe6b1:37723 {}] regionserver.HRegionServer(959): stopping server ef751fafe6b1,37723,1733831182165 2024-12-10T11:46:25,903 DEBUG [RS_CLOSE_REGION-regionserver/ef751fafe6b1:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1733831184500.07cec63129f27f8febb1b3fad8c66956. 2024-12-10T11:46:25,903 INFO [RS:2;ef751fafe6b1:37723 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-10T11:46:25,903 INFO [RS:0;ef751fafe6b1:36793 {}] regionserver.HRegionServer(976): stopping server ef751fafe6b1,36793,1733831181979; all regions closed. 2024-12-10T11:46:25,903 INFO [RS:2;ef751fafe6b1:37723 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:2;ef751fafe6b1:37723. 2024-12-10T11:46:25,903 DEBUG [RS:2;ef751fafe6b1:37723 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-10T11:46:25,903 DEBUG [RS:2;ef751fafe6b1:37723 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T11:46:25,903 INFO [RS:2;ef751fafe6b1:37723 {}] regionserver.HRegionServer(976): stopping server ef751fafe6b1,37723,1733831182165; all regions closed. 2024-12-10T11:46:25,904 INFO [RS:1;ef751fafe6b1:43719 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-12-10T11:46:25,904 DEBUG [RS:1;ef751fafe6b1:43719 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, 07cec63129f27f8febb1b3fad8c66956=TestHBaseWalOnEC,,1733831184500.07cec63129f27f8febb1b3fad8c66956.} 2024-12-10T11:46:25,904 DEBUG [RS_CLOSE_META-regionserver/ef751fafe6b1:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-10T11:46:25,904 INFO [RS_CLOSE_META-regionserver/ef751fafe6b1:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-10T11:46:25,904 DEBUG [RS:1;ef751fafe6b1:43719 {}] regionserver.HRegionServer(1351): Waiting on 07cec63129f27f8febb1b3fad8c66956, 1588230740 2024-12-10T11:46:25,904 DEBUG [RS_CLOSE_META-regionserver/ef751fafe6b1:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-10T11:46:25,904 DEBUG [RS_CLOSE_META-regionserver/ef751fafe6b1:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-10T11:46:25,904 DEBUG [RS_CLOSE_META-regionserver/ef751fafe6b1:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-10T11:46:25,905 INFO [RS_CLOSE_META-regionserver/ef751fafe6b1:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.34 KB heapSize=3.38 KB 2024-12-10T11:46:25,909 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38893 is added to blk_1073741828_1018 (size=93) 2024-12-10T11:46:25,909 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43797 is added to blk_1073741828_1018 (size=93) 2024-12-10T11:46:25,909 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33591 is added to blk_1073741828_1018 (size=93) 2024-12-10T11:46:25,914 DEBUG [RS:2;ef751fafe6b1:37723 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/f70f2c4e-74b6-0388-0390-35db2b15c52d/oldWALs 2024-12-10T11:46:25,915 INFO [RS:2;ef751fafe6b1:37723 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL ef751fafe6b1%2C37723%2C1733831182165:(num 1733831183760) 2024-12-10T11:46:25,915 DEBUG [RS:2;ef751fafe6b1:37723 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T11:46:25,915 INFO [RS:2;ef751fafe6b1:37723 {}] regionserver.LeaseManager(133): Closed leases 2024-12-10T11:46:25,915 INFO [RS:2;ef751fafe6b1:37723 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-10T11:46:25,915 INFO [RS:2;ef751fafe6b1:37723 {}] hbase.ChoreService(370): Chore service for: regionserver/ef751fafe6b1:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-10T11:46:25,915 INFO [RS:2;ef751fafe6b1:37723 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-10T11:46:25,915 INFO [RS:2;ef751fafe6b1:37723 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-10T11:46:25,915 INFO [RS:2;ef751fafe6b1:37723 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-10T11:46:25,916 INFO [RS:2;ef751fafe6b1:37723 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-10T11:46:25,916 INFO [RS:2;ef751fafe6b1:37723 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:37723 2024-12-10T11:46:25,917 INFO [regionserver/ef751fafe6b1:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-10T11:46:25,918 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33591 is added to blk_1073741826_1016 (size=93) 2024-12-10T11:46:25,918 WARN [Close-WAL-Writer-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(650): complete file /user/jenkins/test-data/f70f2c4e-74b6-0388-0390-35db2b15c52d/WALs/ef751fafe6b1,36793,1733831181979/ef751fafe6b1%2C36793%2C1733831181979.1733831183760 not finished, retry = 0 2024-12-10T11:46:25,918 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43797 is added to blk_1073741826_1016 (size=93) 2024-12-10T11:46:25,918 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38893 is added to blk_1073741826_1016 (size=93) 2024-12-10T11:46:25,928 DEBUG [RS_CLOSE_REGION-regionserver/ef751fafe6b1:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45975/user/jenkins/test-data/f70f2c4e-74b6-0388-0390-35db2b15c52d/data/default/TestHBaseWalOnEC/07cec63129f27f8febb1b3fad8c66956/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-10T11:46:25,929 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40549-0x1000f99ff790000, quorum=127.0.0.1:59713, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-10T11:46:25,929 INFO [RS:2;ef751fafe6b1:37723 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-10T11:46:25,930 INFO [RS_CLOSE_REGION-regionserver/ef751fafe6b1:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1733831184500.07cec63129f27f8febb1b3fad8c66956. 2024-12-10T11:46:25,930 DEBUG [RS_CLOSE_REGION-regionserver/ef751fafe6b1:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 07cec63129f27f8febb1b3fad8c66956: Waiting for close lock at 1733831185902Running coprocessor pre-close hooks at 1733831185902Disabling compacts and flushes for region at 1733831185902Disabling writes for close at 1733831185903 (+1 ms)Writing region close event to WAL at 1733831185904 (+1 ms)Running coprocessor post-close hooks at 1733831185929 (+25 ms)Closed at 1733831185930 (+1 ms) 2024-12-10T11:46:25,931 DEBUG [RS_CLOSE_REGION-regionserver/ef751fafe6b1:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestHBaseWalOnEC,,1733831184500.07cec63129f27f8febb1b3fad8c66956. 2024-12-10T11:46:25,934 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37723-0x1000f99ff790003, quorum=127.0.0.1:59713, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/ef751fafe6b1,37723,1733831182165 2024-12-10T11:46:25,938 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [ef751fafe6b1,37723,1733831182165] 2024-12-10T11:46:25,943 DEBUG [RS_CLOSE_META-regionserver/ef751fafe6b1:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45975/user/jenkins/test-data/f70f2c4e-74b6-0388-0390-35db2b15c52d/data/hbase/meta/1588230740/.tmp/info/5378839200b2434d96217a4dc2d7570f is 153, key is TestHBaseWalOnEC,,1733831184500.07cec63129f27f8febb1b3fad8c66956./info:regioninfo/1733831184936/Put/seqid=0 2024-12-10T11:46:25,945 WARN [RS_CLOSE_META-regionserver/ef751fafe6b1:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-10T11:46:25,945 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/ef751fafe6b1,37723,1733831182165 already deleted, retry=false 2024-12-10T11:46:25,946 WARN [RS_CLOSE_META-regionserver/ef751fafe6b1:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-10T11:46:25,946 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; ef751fafe6b1,37723,1733831182165 expired; onlineServers=2 2024-12-10T11:46:25,949 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1346975461_22 at /127.0.0.1:50196 [Receiving block BP-1050097718-172.17.0.2-1733831177438:blk_-9223372036854775632_1026] {}] datanode.DataXceiver(331): 127.0.0.1:43797:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:50196 dst: /127.0.0.1:43797 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T11:46:25,952 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43797 is added to blk_-9223372036854775632_1027 (size=6637) 2024-12-10T11:46:25,953 WARN [RS_CLOSE_META-regionserver/ef751fafe6b1:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-10T11:46:25,953 INFO [RS_CLOSE_META-regionserver/ef751fafe6b1:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.18 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:45975/user/jenkins/test-data/f70f2c4e-74b6-0388-0390-35db2b15c52d/data/hbase/meta/1588230740/.tmp/info/5378839200b2434d96217a4dc2d7570f 2024-12-10T11:46:25,978 DEBUG [RS_CLOSE_META-regionserver/ef751fafe6b1:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45975/user/jenkins/test-data/f70f2c4e-74b6-0388-0390-35db2b15c52d/data/hbase/meta/1588230740/.tmp/ns/c6d29cb2d8d24d1a80dc79f05ddec4c0 is 43, key is default/ns:d/1733831184294/Put/seqid=0 2024-12-10T11:46:25,980 WARN [RS_CLOSE_META-regionserver/ef751fafe6b1:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-10T11:46:25,980 WARN [RS_CLOSE_META-regionserver/ef751fafe6b1:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-10T11:46:25,985 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1346975461_22 at /127.0.0.1:47064 [Receiving block BP-1050097718-172.17.0.2-1733831177438:blk_-9223372036854775616_1028] {}] datanode.DataXceiver(331): 127.0.0.1:33591:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:47064 dst: /127.0.0.1:33591 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T11:46:25,989 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33591 is added to blk_-9223372036854775616_1029 (size=5153) 2024-12-10T11:46:25,990 WARN [RS_CLOSE_META-regionserver/ef751fafe6b1:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-10T11:46:25,990 INFO [RS_CLOSE_META-regionserver/ef751fafe6b1:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:45975/user/jenkins/test-data/f70f2c4e-74b6-0388-0390-35db2b15c52d/data/hbase/meta/1588230740/.tmp/ns/c6d29cb2d8d24d1a80dc79f05ddec4c0 2024-12-10T11:46:25,991 INFO [regionserver/ef751fafe6b1:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-10T11:46:25,992 INFO [regionserver/ef751fafe6b1:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-10T11:46:25,993 INFO [regionserver/ef751fafe6b1:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-10T11:46:26,015 DEBUG [RS_CLOSE_META-regionserver/ef751fafe6b1:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45975/user/jenkins/test-data/f70f2c4e-74b6-0388-0390-35db2b15c52d/data/hbase/meta/1588230740/.tmp/table/38cf8c1362214bc2afe42c2f29e9ddd4 is 52, key is TestHBaseWalOnEC/table:state/1733831184950/Put/seqid=0 2024-12-10T11:46:26,018 WARN [RS_CLOSE_META-regionserver/ef751fafe6b1:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-10T11:46:26,018 WARN [RS_CLOSE_META-regionserver/ef751fafe6b1:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-10T11:46:26,021 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1346975461_22 at /127.0.0.1:50220 [Receiving block BP-1050097718-172.17.0.2-1733831177438:blk_-9223372036854775600_1030] {}] datanode.DataXceiver(331): 127.0.0.1:43797:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:50220 dst: /127.0.0.1:43797 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T11:46:26,022 DEBUG [RS:0;ef751fafe6b1:36793 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/f70f2c4e-74b6-0388-0390-35db2b15c52d/oldWALs 2024-12-10T11:46:26,022 INFO [RS:0;ef751fafe6b1:36793 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL ef751fafe6b1%2C36793%2C1733831181979:(num 1733831183760) 2024-12-10T11:46:26,022 DEBUG [RS:0;ef751fafe6b1:36793 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T11:46:26,022 INFO [RS:0;ef751fafe6b1:36793 {}] regionserver.LeaseManager(133): Closed leases 2024-12-10T11:46:26,022 INFO [RS:0;ef751fafe6b1:36793 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-10T11:46:26,022 INFO [RS:0;ef751fafe6b1:36793 {}] hbase.ChoreService(370): Chore service for: regionserver/ef751fafe6b1:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-10T11:46:26,022 INFO [RS:0;ef751fafe6b1:36793 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-10T11:46:26,022 INFO [regionserver/ef751fafe6b1:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-10T11:46:26,022 INFO [RS:0;ef751fafe6b1:36793 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-10T11:46:26,023 INFO [RS:0;ef751fafe6b1:36793 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-10T11:46:26,023 INFO [RS:0;ef751fafe6b1:36793 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-10T11:46:26,023 INFO [RS:0;ef751fafe6b1:36793 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:36793 2024-12-10T11:46:26,025 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43797 is added to blk_-9223372036854775600_1031 (size=5249) 2024-12-10T11:46:26,025 WARN [RS_CLOSE_META-regionserver/ef751fafe6b1:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-10T11:46:26,025 INFO [RS_CLOSE_META-regionserver/ef751fafe6b1:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=96 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:45975/user/jenkins/test-data/f70f2c4e-74b6-0388-0390-35db2b15c52d/data/hbase/meta/1588230740/.tmp/table/38cf8c1362214bc2afe42c2f29e9ddd4 2024-12-10T11:46:26,035 DEBUG [RS_CLOSE_META-regionserver/ef751fafe6b1:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45975/user/jenkins/test-data/f70f2c4e-74b6-0388-0390-35db2b15c52d/data/hbase/meta/1588230740/.tmp/info/5378839200b2434d96217a4dc2d7570f as hdfs://localhost:45975/user/jenkins/test-data/f70f2c4e-74b6-0388-0390-35db2b15c52d/data/hbase/meta/1588230740/info/5378839200b2434d96217a4dc2d7570f 2024-12-10T11:46:26,038 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37723-0x1000f99ff790003, quorum=127.0.0.1:59713, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-10T11:46:26,038 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37723-0x1000f99ff790003, quorum=127.0.0.1:59713, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-10T11:46:26,038 INFO [RS:2;ef751fafe6b1:37723 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-10T11:46:26,038 INFO [RS:2;ef751fafe6b1:37723 {}] regionserver.HRegionServer(1031): Exiting; stopping=ef751fafe6b1,37723,1733831182165; zookeeper connection closed. 2024-12-10T11:46:26,039 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@310702d6 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@310702d6 2024-12-10T11:46:26,045 INFO [RS_CLOSE_META-regionserver/ef751fafe6b1:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45975/user/jenkins/test-data/f70f2c4e-74b6-0388-0390-35db2b15c52d/data/hbase/meta/1588230740/info/5378839200b2434d96217a4dc2d7570f, entries=10, sequenceid=11, filesize=6.5 K 2024-12-10T11:46:26,045 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36793-0x1000f99ff790001, quorum=127.0.0.1:59713, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/ef751fafe6b1,36793,1733831181979 2024-12-10T11:46:26,045 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40549-0x1000f99ff790000, quorum=127.0.0.1:59713, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-10T11:46:26,046 INFO [RS:0;ef751fafe6b1:36793 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-10T11:46:26,046 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [ef751fafe6b1,36793,1733831181979] 2024-12-10T11:46:26,047 DEBUG [RS_CLOSE_META-regionserver/ef751fafe6b1:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45975/user/jenkins/test-data/f70f2c4e-74b6-0388-0390-35db2b15c52d/data/hbase/meta/1588230740/.tmp/ns/c6d29cb2d8d24d1a80dc79f05ddec4c0 as hdfs://localhost:45975/user/jenkins/test-data/f70f2c4e-74b6-0388-0390-35db2b15c52d/data/hbase/meta/1588230740/ns/c6d29cb2d8d24d1a80dc79f05ddec4c0 2024-12-10T11:46:26,058 INFO [RS_CLOSE_META-regionserver/ef751fafe6b1:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45975/user/jenkins/test-data/f70f2c4e-74b6-0388-0390-35db2b15c52d/data/hbase/meta/1588230740/ns/c6d29cb2d8d24d1a80dc79f05ddec4c0, entries=2, sequenceid=11, filesize=5.0 K 2024-12-10T11:46:26,060 DEBUG [RS_CLOSE_META-regionserver/ef751fafe6b1:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45975/user/jenkins/test-data/f70f2c4e-74b6-0388-0390-35db2b15c52d/data/hbase/meta/1588230740/.tmp/table/38cf8c1362214bc2afe42c2f29e9ddd4 as hdfs://localhost:45975/user/jenkins/test-data/f70f2c4e-74b6-0388-0390-35db2b15c52d/data/hbase/meta/1588230740/table/38cf8c1362214bc2afe42c2f29e9ddd4 2024-12-10T11:46:26,062 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/ef751fafe6b1,36793,1733831181979 already deleted, retry=false 2024-12-10T11:46:26,062 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; ef751fafe6b1,36793,1733831181979 expired; onlineServers=1 2024-12-10T11:46:26,071 INFO [RS_CLOSE_META-regionserver/ef751fafe6b1:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45975/user/jenkins/test-data/f70f2c4e-74b6-0388-0390-35db2b15c52d/data/hbase/meta/1588230740/table/38cf8c1362214bc2afe42c2f29e9ddd4, entries=2, sequenceid=11, filesize=5.1 K 2024-12-10T11:46:26,073 INFO [RS_CLOSE_META-regionserver/ef751fafe6b1:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 168ms, sequenceid=11, compaction requested=false 2024-12-10T11:46:26,073 DEBUG [RS_CLOSE_META-regionserver/ef751fafe6b1:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-10T11:46:26,081 DEBUG [RS_CLOSE_META-regionserver/ef751fafe6b1:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45975/user/jenkins/test-data/f70f2c4e-74b6-0388-0390-35db2b15c52d/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-12-10T11:46:26,082 DEBUG [RS_CLOSE_META-regionserver/ef751fafe6b1:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-10T11:46:26,082 INFO [RS_CLOSE_META-regionserver/ef751fafe6b1:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-10T11:46:26,082 DEBUG [RS_CLOSE_META-regionserver/ef751fafe6b1:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733831185904Running coprocessor pre-close hooks at 1733831185904Disabling compacts and flushes for region at 1733831185904Disabling writes for close at 1733831185904Obtaining lock to block concurrent updates at 1733831185905 (+1 ms)Preparing flush snapshotting stores in 1588230740 at 1733831185905Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1377, getHeapSize=3392, getOffHeapSize=0, getCellsCount=14 at 1733831185905Flushing stores of hbase:meta,,1.1588230740 at 1733831185906 (+1 ms)Flushing 1588230740/info: creating writer at 1733831185907 (+1 ms)Flushing 1588230740/info: appending metadata at 1733831185940 (+33 ms)Flushing 1588230740/info: closing flushed file at 1733831185940Flushing 1588230740/ns: creating writer at 1733831185963 (+23 ms)Flushing 1588230740/ns: appending metadata at 1733831185977 (+14 ms)Flushing 1588230740/ns: closing flushed file at 1733831185977Flushing 1588230740/table: creating writer at 1733831185999 (+22 ms)Flushing 1588230740/table: appending metadata at 1733831186014 (+15 ms)Flushing 1588230740/table: closing flushed file at 1733831186014Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@41232ad6: reopening flushed file at 1733831186034 (+20 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3808598a: reopening flushed file at 1733831186045 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@40b15022: reopening flushed file at 1733831186058 (+13 ms)Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 168ms, sequenceid=11, compaction requested=false at 1733831186073 (+15 ms)Writing region close event to WAL at 1733831186074 (+1 ms)Running coprocessor post-close hooks at 1733831186082 (+8 ms)Closed at 1733831186082 2024-12-10T11:46:26,082 DEBUG [RS_CLOSE_META-regionserver/ef751fafe6b1:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-10T11:46:26,104 INFO [RS:1;ef751fafe6b1:43719 {}] regionserver.HRegionServer(976): stopping server ef751fafe6b1,43719,1733831182093; all regions closed. 2024-12-10T11:46:26,108 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33591 is added to blk_1073741829_1019 (size=2751) 2024-12-10T11:46:26,108 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38893 is added to blk_1073741829_1019 (size=2751) 2024-12-10T11:46:26,108 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43797 is added to blk_1073741829_1019 (size=2751) 2024-12-10T11:46:26,112 DEBUG [RS:1;ef751fafe6b1:43719 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/f70f2c4e-74b6-0388-0390-35db2b15c52d/oldWALs 2024-12-10T11:46:26,112 INFO [RS:1;ef751fafe6b1:43719 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL ef751fafe6b1%2C43719%2C1733831182093.meta:.meta(num 1733831184128) 2024-12-10T11:46:26,116 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38893 is added to blk_1073741827_1017 (size=1298) 2024-12-10T11:46:26,116 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43797 is added to blk_1073741827_1017 (size=1298) 2024-12-10T11:46:26,116 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33591 is added to blk_1073741827_1017 (size=1298) 2024-12-10T11:46:26,119 DEBUG [RS:1;ef751fafe6b1:43719 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/f70f2c4e-74b6-0388-0390-35db2b15c52d/oldWALs 2024-12-10T11:46:26,119 INFO [RS:1;ef751fafe6b1:43719 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL ef751fafe6b1%2C43719%2C1733831182093:(num 1733831183760) 2024-12-10T11:46:26,120 DEBUG [RS:1;ef751fafe6b1:43719 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T11:46:26,120 INFO [RS:1;ef751fafe6b1:43719 {}] regionserver.LeaseManager(133): Closed leases 2024-12-10T11:46:26,120 INFO [RS:1;ef751fafe6b1:43719 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-10T11:46:26,120 INFO [RS:1;ef751fafe6b1:43719 {}] hbase.ChoreService(370): Chore service for: regionserver/ef751fafe6b1:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-10T11:46:26,120 INFO [RS:1;ef751fafe6b1:43719 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-10T11:46:26,120 INFO [regionserver/ef751fafe6b1:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-10T11:46:26,120 INFO [RS:1;ef751fafe6b1:43719 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:43719 2024-12-10T11:46:26,145 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40549-0x1000f99ff790000, quorum=127.0.0.1:59713, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-10T11:46:26,145 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43719-0x1000f99ff790002, quorum=127.0.0.1:59713, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/ef751fafe6b1,43719,1733831182093 2024-12-10T11:46:26,146 INFO [RS:1;ef751fafe6b1:43719 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-10T11:46:26,154 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [ef751fafe6b1,43719,1733831182093] 2024-12-10T11:46:26,154 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36793-0x1000f99ff790001, quorum=127.0.0.1:59713, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-10T11:46:26,154 INFO [RS:0;ef751fafe6b1:36793 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-10T11:46:26,154 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36793-0x1000f99ff790001, quorum=127.0.0.1:59713, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-10T11:46:26,154 INFO [RS:0;ef751fafe6b1:36793 {}] regionserver.HRegionServer(1031): Exiting; stopping=ef751fafe6b1,36793,1733831181979; zookeeper connection closed. 2024-12-10T11:46:26,155 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@11577032 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@11577032 2024-12-10T11:46:26,162 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/ef751fafe6b1,43719,1733831182093 already deleted, retry=false 2024-12-10T11:46:26,162 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; ef751fafe6b1,43719,1733831182093 expired; onlineServers=0 2024-12-10T11:46:26,163 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'ef751fafe6b1,40549,1733831181287' ***** 2024-12-10T11:46:26,163 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-10T11:46:26,163 INFO [M:0;ef751fafe6b1:40549 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-10T11:46:26,163 INFO [M:0;ef751fafe6b1:40549 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-10T11:46:26,163 DEBUG [M:0;ef751fafe6b1:40549 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-10T11:46:26,163 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-10T11:46:26,164 DEBUG [M:0;ef751fafe6b1:40549 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-10T11:46:26,164 DEBUG [master/ef751fafe6b1:0:becomeActiveMaster-HFileCleaner.large.0-1733831183339 {}] cleaner.HFileCleaner(306): Exit Thread[master/ef751fafe6b1:0:becomeActiveMaster-HFileCleaner.large.0-1733831183339,5,FailOnTimeoutGroup] 2024-12-10T11:46:26,164 DEBUG [master/ef751fafe6b1:0:becomeActiveMaster-HFileCleaner.small.0-1733831183343 {}] cleaner.HFileCleaner(306): Exit Thread[master/ef751fafe6b1:0:becomeActiveMaster-HFileCleaner.small.0-1733831183343,5,FailOnTimeoutGroup] 2024-12-10T11:46:26,164 INFO [M:0;ef751fafe6b1:40549 {}] hbase.ChoreService(370): Chore service for: master/ef751fafe6b1:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-10T11:46:26,164 INFO [M:0;ef751fafe6b1:40549 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-10T11:46:26,165 DEBUG [M:0;ef751fafe6b1:40549 {}] master.HMaster(1795): Stopping service threads 2024-12-10T11:46:26,165 INFO [M:0;ef751fafe6b1:40549 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-10T11:46:26,165 INFO [M:0;ef751fafe6b1:40549 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-10T11:46:26,166 INFO [M:0;ef751fafe6b1:40549 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-10T11:46:26,166 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-10T11:46:26,175 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40549-0x1000f99ff790000, quorum=127.0.0.1:59713, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-10T11:46:26,175 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40549-0x1000f99ff790000, quorum=127.0.0.1:59713, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T11:46:26,176 DEBUG [M:0;ef751fafe6b1:40549 {}] zookeeper.ZKUtil(347): master:40549-0x1000f99ff790000, quorum=127.0.0.1:59713, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-10T11:46:26,176 WARN [M:0;ef751fafe6b1:40549 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-10T11:46:26,178 INFO [M:0;ef751fafe6b1:40549 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:45975/user/jenkins/test-data/f70f2c4e-74b6-0388-0390-35db2b15c52d/.lastflushedseqids 2024-12-10T11:46:26,192 WARN [M:0;ef751fafe6b1:40549 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-10T11:46:26,192 WARN [M:0;ef751fafe6b1:40549 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-10T11:46:26,195 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-884346255_22 at /127.0.0.1:47070 [Receiving block BP-1050097718-172.17.0.2-1733831177438:blk_-9223372036854775584_1032] {}] datanode.DataXceiver(331): 127.0.0.1:33591:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:47070 dst: /127.0.0.1:33591 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T11:46:26,200 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33591 is added to blk_-9223372036854775584_1033 (size=127) 2024-12-10T11:46:26,201 WARN [M:0;ef751fafe6b1:40549 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-10T11:46:26,201 INFO [M:0;ef751fafe6b1:40549 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-10T11:46:26,201 INFO [M:0;ef751fafe6b1:40549 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-10T11:46:26,201 DEBUG [M:0;ef751fafe6b1:40549 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-10T11:46:26,201 INFO [M:0;ef751fafe6b1:40549 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-10T11:46:26,201 DEBUG [M:0;ef751fafe6b1:40549 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-10T11:46:26,201 DEBUG [M:0;ef751fafe6b1:40549 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-10T11:46:26,201 DEBUG [M:0;ef751fafe6b1:40549 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-10T11:46:26,201 INFO [M:0;ef751fafe6b1:40549 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=26.85 KB heapSize=34.13 KB 2024-12-10T11:46:26,220 DEBUG [M:0;ef751fafe6b1:40549 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45975/user/jenkins/test-data/f70f2c4e-74b6-0388-0390-35db2b15c52d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/32bef8b14d3840fbb6c564ddacb7741a is 82, key is hbase:meta,,1/info:regioninfo/1733831184213/Put/seqid=0 2024-12-10T11:46:26,222 WARN [M:0;ef751fafe6b1:40549 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-10T11:46:26,223 WARN [M:0;ef751fafe6b1:40549 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-10T11:46:26,225 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-884346255_22 at /127.0.0.1:50240 [Receiving block BP-1050097718-172.17.0.2-1733831177438:blk_-9223372036854775568_1034] {}] datanode.DataXceiver(331): 127.0.0.1:43797:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:50240 dst: /127.0.0.1:43797 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T11:46:26,229 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43797 is added to blk_-9223372036854775568_1035 (size=5672) 2024-12-10T11:46:26,230 WARN [M:0;ef751fafe6b1:40549 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-10T11:46:26,230 INFO [M:0;ef751fafe6b1:40549 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:45975/user/jenkins/test-data/f70f2c4e-74b6-0388-0390-35db2b15c52d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/32bef8b14d3840fbb6c564ddacb7741a 2024-12-10T11:46:26,252 DEBUG [M:0;ef751fafe6b1:40549 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45975/user/jenkins/test-data/f70f2c4e-74b6-0388-0390-35db2b15c52d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/6784738ec28147acbbdfd0c952790eea is 749, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1733831184956/Put/seqid=0 2024-12-10T11:46:26,254 INFO [RS:1;ef751fafe6b1:43719 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-10T11:46:26,254 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43719-0x1000f99ff790002, quorum=127.0.0.1:59713, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-10T11:46:26,254 WARN [M:0;ef751fafe6b1:40549 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-10T11:46:26,254 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43719-0x1000f99ff790002, quorum=127.0.0.1:59713, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-10T11:46:26,254 INFO [RS:1;ef751fafe6b1:43719 {}] regionserver.HRegionServer(1031): Exiting; stopping=ef751fafe6b1,43719,1733831182093; zookeeper connection closed. 2024-12-10T11:46:26,254 WARN [M:0;ef751fafe6b1:40549 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-10T11:46:26,255 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@12a646ee {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@12a646ee 2024-12-10T11:46:26,255 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 3 regionserver(s) complete 2024-12-10T11:46:26,257 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-884346255_22 at /127.0.0.1:50270 [Receiving block BP-1050097718-172.17.0.2-1733831177438:blk_-9223372036854775552_1036] {}] datanode.DataXceiver(331): 127.0.0.1:43797:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:50270 dst: /127.0.0.1:43797 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T11:46:26,261 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43797 is added to blk_-9223372036854775552_1037 (size=6441) 2024-12-10T11:46:26,262 WARN [M:0;ef751fafe6b1:40549 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-10T11:46:26,262 INFO [M:0;ef751fafe6b1:40549 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.17 KB at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:45975/user/jenkins/test-data/f70f2c4e-74b6-0388-0390-35db2b15c52d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/6784738ec28147acbbdfd0c952790eea 2024-12-10T11:46:26,280 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38893 is added to blk_-9223372036854775725_1010 (size=34) 2024-12-10T11:46:26,285 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43797 is added to blk_-9223372036854775757_1006 (size=196) 2024-12-10T11:46:26,285 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33591 is added to blk_-9223372036854775756_1006 (size=196) 2024-12-10T11:46:26,288 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33591 is added to blk_-9223372036854775692_1015 (size=32) 2024-12-10T11:46:26,288 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43797 is added to blk_-9223372036854775724_1010 (size=34) 2024-12-10T11:46:26,289 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38893 is added to blk_-9223372036854775709_1013 (size=1321) 2024-12-10T11:46:26,292 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43797 is added to blk_-9223372036854775693_1015 (size=32) 2024-12-10T11:46:26,293 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43797 is added to blk_-9223372036854775708_1013 (size=1321) 2024-12-10T11:46:26,295 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33591 is added to blk_-9223372036854775773_1004 (size=42) 2024-12-10T11:46:26,295 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43797 is added to blk_-9223372036854775772_1004 (size=42) 2024-12-10T11:46:26,296 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43797 is added to blk_-9223372036854775740_1008 (size=1189) 2024-12-10T11:46:26,296 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33591 is added to blk_-9223372036854775741_1008 (size=1189) 2024-12-10T11:46:26,296 DEBUG [M:0;ef751fafe6b1:40549 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45975/user/jenkins/test-data/f70f2c4e-74b6-0388-0390-35db2b15c52d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/624b3681a4544a979d392bb0fb0d04ce is 69, key is ef751fafe6b1,36793,1733831181979/rs:state/1733831183474/Put/seqid=0 2024-12-10T11:46:26,298 WARN [M:0;ef751fafe6b1:40549 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-10T11:46:26,298 WARN [M:0;ef751fafe6b1:40549 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-10T11:46:26,300 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-884346255_22 at /127.0.0.1:50336 [Receiving block BP-1050097718-172.17.0.2-1733831177438:blk_-9223372036854775536_1038] {}] datanode.DataXceiver(331): 127.0.0.1:43797:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:50336 dst: /127.0.0.1:43797 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T11:46:26,304 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43797 is added to blk_-9223372036854775536_1039 (size=5294) 2024-12-10T11:46:26,304 WARN [M:0;ef751fafe6b1:40549 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-10T11:46:26,304 INFO [M:0;ef751fafe6b1:40549 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=195 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:45975/user/jenkins/test-data/f70f2c4e-74b6-0388-0390-35db2b15c52d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/624b3681a4544a979d392bb0fb0d04ce 2024-12-10T11:46:26,313 DEBUG [M:0;ef751fafe6b1:40549 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45975/user/jenkins/test-data/f70f2c4e-74b6-0388-0390-35db2b15c52d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/32bef8b14d3840fbb6c564ddacb7741a as hdfs://localhost:45975/user/jenkins/test-data/f70f2c4e-74b6-0388-0390-35db2b15c52d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/32bef8b14d3840fbb6c564ddacb7741a 2024-12-10T11:46:26,320 INFO [M:0;ef751fafe6b1:40549 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45975/user/jenkins/test-data/f70f2c4e-74b6-0388-0390-35db2b15c52d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/32bef8b14d3840fbb6c564ddacb7741a, entries=8, sequenceid=72, filesize=5.5 K 2024-12-10T11:46:26,321 DEBUG [M:0;ef751fafe6b1:40549 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45975/user/jenkins/test-data/f70f2c4e-74b6-0388-0390-35db2b15c52d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/6784738ec28147acbbdfd0c952790eea as hdfs://localhost:45975/user/jenkins/test-data/f70f2c4e-74b6-0388-0390-35db2b15c52d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/6784738ec28147acbbdfd0c952790eea 2024-12-10T11:46:26,330 INFO [M:0;ef751fafe6b1:40549 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45975/user/jenkins/test-data/f70f2c4e-74b6-0388-0390-35db2b15c52d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/6784738ec28147acbbdfd0c952790eea, entries=8, sequenceid=72, filesize=6.3 K 2024-12-10T11:46:26,331 DEBUG [M:0;ef751fafe6b1:40549 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45975/user/jenkins/test-data/f70f2c4e-74b6-0388-0390-35db2b15c52d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/624b3681a4544a979d392bb0fb0d04ce as hdfs://localhost:45975/user/jenkins/test-data/f70f2c4e-74b6-0388-0390-35db2b15c52d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/624b3681a4544a979d392bb0fb0d04ce 2024-12-10T11:46:26,339 INFO [M:0;ef751fafe6b1:40549 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45975/user/jenkins/test-data/f70f2c4e-74b6-0388-0390-35db2b15c52d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/624b3681a4544a979d392bb0fb0d04ce, entries=3, sequenceid=72, filesize=5.2 K 2024-12-10T11:46:26,341 INFO [M:0;ef751fafe6b1:40549 {}] regionserver.HRegion(3140): Finished flush of dataSize ~26.85 KB/27492, heapSize ~33.84 KB/34648, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 140ms, sequenceid=72, compaction requested=false 2024-12-10T11:46:26,342 INFO [M:0;ef751fafe6b1:40549 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-10T11:46:26,342 DEBUG [M:0;ef751fafe6b1:40549 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733831186201Disabling compacts and flushes for region at 1733831186201Disabling writes for close at 1733831186201Obtaining lock to block concurrent updates at 1733831186201Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733831186201Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=27492, getHeapSize=34888, getOffHeapSize=0, getCellsCount=85 at 1733831186202 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733831186203 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733831186203Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733831186219 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733831186219Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733831186238 (+19 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733831186252 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733831186252Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733831186270 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733831186296 (+26 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733831186296Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@127b425: reopening flushed file at 1733831186311 (+15 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@384eb1fd: reopening flushed file at 1733831186320 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@178ba908: reopening flushed file at 1733831186330 (+10 ms)Finished flush of dataSize ~26.85 KB/27492, heapSize ~33.84 KB/34648, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 140ms, sequenceid=72, compaction requested=false at 1733831186341 (+11 ms)Writing region close event to WAL at 1733831186342 (+1 ms)Closed at 1733831186342 2024-12-10T11:46:26,345 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33591 is added to blk_1073741825_1011 (size=32695) 2024-12-10T11:46:26,345 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43797 is added to blk_1073741825_1011 (size=32695) 2024-12-10T11:46:26,345 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38893 is added to blk_1073741825_1011 (size=32695) 2024-12-10T11:46:26,346 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-10T11:46:26,346 INFO [M:0;ef751fafe6b1:40549 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-10T11:46:26,346 INFO [M:0;ef751fafe6b1:40549 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:40549 2024-12-10T11:46:26,346 INFO [M:0;ef751fafe6b1:40549 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-10T11:46:26,467 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40549-0x1000f99ff790000, quorum=127.0.0.1:59713, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-10T11:46:26,467 INFO [M:0;ef751fafe6b1:40549 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-10T11:46:26,467 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40549-0x1000f99ff790000, quorum=127.0.0.1:59713, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-10T11:46:26,510 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2e59159d{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-10T11:46:26,516 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@a8e922f{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-10T11:46:26,517 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-10T11:46:26,517 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@24f92c39{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-10T11:46:26,517 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@c62369b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7aff932c-c629-6115-2de0-fdda12f4f96d/hadoop.log.dir/,STOPPED} 2024-12-10T11:46:26,520 WARN [BP-1050097718-172.17.0.2-1733831177438 heartbeating to localhost/127.0.0.1:45975 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-10T11:46:26,520 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-10T11:46:26,521 WARN [BP-1050097718-172.17.0.2-1733831177438 heartbeating to localhost/127.0.0.1:45975 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1050097718-172.17.0.2-1733831177438 (Datanode Uuid 56ba2a47-76fb-4fd2-b03e-8537a18173f8) service to localhost/127.0.0.1:45975 2024-12-10T11:46:26,521 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-10T11:46:26,522 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7aff932c-c629-6115-2de0-fdda12f4f96d/cluster_4825df41-c912-d781-3feb-722196b52d31/data/data5/current/BP-1050097718-172.17.0.2-1733831177438 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-10T11:46:26,522 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7aff932c-c629-6115-2de0-fdda12f4f96d/cluster_4825df41-c912-d781-3feb-722196b52d31/data/data6/current/BP-1050097718-172.17.0.2-1733831177438 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-10T11:46:26,522 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-10T11:46:26,524 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1c6b8f01{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-10T11:46:26,524 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@11f28dd2{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-10T11:46:26,524 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-10T11:46:26,524 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7fa8fa5c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-10T11:46:26,524 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6463ad04{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7aff932c-c629-6115-2de0-fdda12f4f96d/hadoop.log.dir/,STOPPED} 2024-12-10T11:46:26,525 WARN [BP-1050097718-172.17.0.2-1733831177438 heartbeating to localhost/127.0.0.1:45975 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-10T11:46:26,525 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-10T11:46:26,525 WARN [BP-1050097718-172.17.0.2-1733831177438 heartbeating to localhost/127.0.0.1:45975 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1050097718-172.17.0.2-1733831177438 (Datanode Uuid fa319895-7728-4d59-b387-962e0eca2518) service to localhost/127.0.0.1:45975 2024-12-10T11:46:26,525 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-10T11:46:26,526 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7aff932c-c629-6115-2de0-fdda12f4f96d/cluster_4825df41-c912-d781-3feb-722196b52d31/data/data3/current/BP-1050097718-172.17.0.2-1733831177438 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-10T11:46:26,526 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7aff932c-c629-6115-2de0-fdda12f4f96d/cluster_4825df41-c912-d781-3feb-722196b52d31/data/data4/current/BP-1050097718-172.17.0.2-1733831177438 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-10T11:46:26,526 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-10T11:46:26,530 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4839957b{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-10T11:46:26,531 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5306f615{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-10T11:46:26,531 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-10T11:46:26,531 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1a2478ad{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-10T11:46:26,531 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@550154bd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7aff932c-c629-6115-2de0-fdda12f4f96d/hadoop.log.dir/,STOPPED} 2024-12-10T11:46:26,532 WARN [BP-1050097718-172.17.0.2-1733831177438 heartbeating to localhost/127.0.0.1:45975 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-10T11:46:26,532 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-10T11:46:26,533 WARN [BP-1050097718-172.17.0.2-1733831177438 heartbeating to localhost/127.0.0.1:45975 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1050097718-172.17.0.2-1733831177438 (Datanode Uuid b09f0da5-dbd2-43d7-a8cf-d987533785f3) service to localhost/127.0.0.1:45975 2024-12-10T11:46:26,533 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-10T11:46:26,533 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7aff932c-c629-6115-2de0-fdda12f4f96d/cluster_4825df41-c912-d781-3feb-722196b52d31/data/data1/current/BP-1050097718-172.17.0.2-1733831177438 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-10T11:46:26,533 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7aff932c-c629-6115-2de0-fdda12f4f96d/cluster_4825df41-c912-d781-3feb-722196b52d31/data/data2/current/BP-1050097718-172.17.0.2-1733831177438 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-10T11:46:26,534 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-10T11:46:26,545 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@76e4c45c{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-10T11:46:26,546 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4637aff6{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-10T11:46:26,546 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-10T11:46:26,546 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@383d55e4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-10T11:46:26,546 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@21b7d177{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7aff932c-c629-6115-2de0-fdda12f4f96d/hadoop.log.dir/,STOPPED} 2024-12-10T11:46:26,554 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-10T11:46:26,582 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-10T11:46:26,588 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestHBaseWalOnEC#testReadWrite[0] Thread=86 (was 157), OpenFileDescriptor=439 (was 391) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=174 (was 172) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=7090 (was 7355) 2024-12-10T11:46:26,593 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestHBaseWalOnEC#testReadWrite[1] Thread=86, OpenFileDescriptor=439, MaxFileDescriptor=1048576, SystemLoadAverage=174, ProcessCount=11, AvailableMemoryMB=7090 2024-12-10T11:46:26,593 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=3, rsPorts=, rsClass=null, numDataNodes=3, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-10T11:46:26,593 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7aff932c-c629-6115-2de0-fdda12f4f96d/hadoop.log.dir so I do NOT create it in target/test-data/1fe8c5e9-199c-4b47-c3e3-1284748a1506 2024-12-10T11:46:26,593 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7aff932c-c629-6115-2de0-fdda12f4f96d/hadoop.tmp.dir so I do NOT create it in target/test-data/1fe8c5e9-199c-4b47-c3e3-1284748a1506 2024-12-10T11:46:26,593 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1fe8c5e9-199c-4b47-c3e3-1284748a1506/cluster_65dad53e-0639-7dd9-5754-38c942081c2f, deleteOnExit=true 2024-12-10T11:46:26,593 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-10T11:46:26,594 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1fe8c5e9-199c-4b47-c3e3-1284748a1506/test.cache.data in system properties and HBase conf 2024-12-10T11:46:26,594 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1fe8c5e9-199c-4b47-c3e3-1284748a1506/hadoop.tmp.dir in system properties and HBase conf 2024-12-10T11:46:26,594 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1fe8c5e9-199c-4b47-c3e3-1284748a1506/hadoop.log.dir in system properties and HBase conf 2024-12-10T11:46:26,594 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1fe8c5e9-199c-4b47-c3e3-1284748a1506/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-10T11:46:26,594 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1fe8c5e9-199c-4b47-c3e3-1284748a1506/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-10T11:46:26,594 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-10T11:46:26,594 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-10T11:46:26,594 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1fe8c5e9-199c-4b47-c3e3-1284748a1506/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-10T11:46:26,594 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1fe8c5e9-199c-4b47-c3e3-1284748a1506/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-10T11:46:26,595 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1fe8c5e9-199c-4b47-c3e3-1284748a1506/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-10T11:46:26,595 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1fe8c5e9-199c-4b47-c3e3-1284748a1506/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-10T11:46:26,595 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1fe8c5e9-199c-4b47-c3e3-1284748a1506/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-10T11:46:26,595 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1fe8c5e9-199c-4b47-c3e3-1284748a1506/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-10T11:46:26,595 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1fe8c5e9-199c-4b47-c3e3-1284748a1506/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-10T11:46:26,595 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1fe8c5e9-199c-4b47-c3e3-1284748a1506/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-10T11:46:26,595 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1fe8c5e9-199c-4b47-c3e3-1284748a1506/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-10T11:46:26,595 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1fe8c5e9-199c-4b47-c3e3-1284748a1506/nfs.dump.dir in system properties and HBase conf 2024-12-10T11:46:26,595 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1fe8c5e9-199c-4b47-c3e3-1284748a1506/java.io.tmpdir in system properties and HBase conf 2024-12-10T11:46:26,595 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1fe8c5e9-199c-4b47-c3e3-1284748a1506/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-10T11:46:26,595 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1fe8c5e9-199c-4b47-c3e3-1284748a1506/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-10T11:46:26,595 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1fe8c5e9-199c-4b47-c3e3-1284748a1506/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-10T11:46:26,848 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-10T11:46:26,853 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-10T11:46:26,854 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-10T11:46:26,854 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-10T11:46:26,854 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-10T11:46:26,855 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-10T11:46:26,855 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@a49b909{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1fe8c5e9-199c-4b47-c3e3-1284748a1506/hadoop.log.dir/,AVAILABLE} 2024-12-10T11:46:26,855 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@56aa9d3b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-10T11:46:26,943 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7aaeb6cf{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1fe8c5e9-199c-4b47-c3e3-1284748a1506/java.io.tmpdir/jetty-localhost-40921-hadoop-hdfs-3_4_1-tests_jar-_-any-9454560191884105968/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-10T11:46:26,944 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@686c9dd5{HTTP/1.1, (http/1.1)}{localhost:40921} 2024-12-10T11:46:26,944 INFO [Time-limited test {}] server.Server(415): Started @11164ms 2024-12-10T11:46:27,169 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-10T11:46:27,172 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-10T11:46:27,172 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-10T11:46:27,172 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-10T11:46:27,173 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-10T11:46:27,173 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2807f8c2{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1fe8c5e9-199c-4b47-c3e3-1284748a1506/hadoop.log.dir/,AVAILABLE} 2024-12-10T11:46:27,173 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@61a92fea{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-10T11:46:27,263 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@38e5384{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1fe8c5e9-199c-4b47-c3e3-1284748a1506/java.io.tmpdir/jetty-localhost-40929-hadoop-hdfs-3_4_1-tests_jar-_-any-14760718627449959008/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-10T11:46:27,263 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7d6118e0{HTTP/1.1, (http/1.1)}{localhost:40929} 2024-12-10T11:46:27,263 INFO [Time-limited test {}] server.Server(415): Started @11483ms 2024-12-10T11:46:27,264 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-10T11:46:27,296 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-10T11:46:27,300 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-10T11:46:27,300 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-10T11:46:27,300 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-10T11:46:27,301 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-10T11:46:27,301 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5b4297c4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1fe8c5e9-199c-4b47-c3e3-1284748a1506/hadoop.log.dir/,AVAILABLE} 2024-12-10T11:46:27,302 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@bb1336{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-10T11:46:27,392 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6e5e4927{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1fe8c5e9-199c-4b47-c3e3-1284748a1506/java.io.tmpdir/jetty-localhost-37143-hadoop-hdfs-3_4_1-tests_jar-_-any-2281492334461819562/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-10T11:46:27,393 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1768a8c1{HTTP/1.1, (http/1.1)}{localhost:37143} 2024-12-10T11:46:27,393 INFO [Time-limited test {}] server.Server(415): Started @11613ms 2024-12-10T11:46:27,394 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-10T11:46:27,428 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-10T11:46:27,431 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-10T11:46:27,432 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-10T11:46:27,432 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-10T11:46:27,432 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-10T11:46:27,432 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6e0095f0{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1fe8c5e9-199c-4b47-c3e3-1284748a1506/hadoop.log.dir/,AVAILABLE} 2024-12-10T11:46:27,433 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@38da8210{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-10T11:46:27,521 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@bff0a43{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1fe8c5e9-199c-4b47-c3e3-1284748a1506/java.io.tmpdir/jetty-localhost-45261-hadoop-hdfs-3_4_1-tests_jar-_-any-16253100446966961542/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-10T11:46:27,522 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@19dff04d{HTTP/1.1, (http/1.1)}{localhost:45261} 2024-12-10T11:46:27,522 INFO [Time-limited test {}] server.Server(415): Started @11742ms 2024-12-10T11:46:27,523 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-10T11:46:28,068 WARN [Thread-569 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1fe8c5e9-199c-4b47-c3e3-1284748a1506/cluster_65dad53e-0639-7dd9-5754-38c942081c2f/data/data1/current/BP-21781453-172.17.0.2-1733831186620/current, will proceed with Du for space computation calculation, 2024-12-10T11:46:28,071 WARN [Thread-570 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1fe8c5e9-199c-4b47-c3e3-1284748a1506/cluster_65dad53e-0639-7dd9-5754-38c942081c2f/data/data2/current/BP-21781453-172.17.0.2-1733831186620/current, will proceed with Du for space computation calculation, 2024-12-10T11:46:28,086 WARN [Thread-511 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-10T11:46:28,088 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x254300a24da75ed7 with lease ID 0xca5e5233a758dbac: Processing first storage report for DS-f7b01ebd-9220-40bf-a3dc-f171b1eb54d3 from datanode DatanodeRegistration(127.0.0.1:43989, datanodeUuid=ffcc43a6-daf2-4943-8178-3452b18ebd17, infoPort=40041, infoSecurePort=0, ipcPort=41201, storageInfo=lv=-57;cid=testClusterID;nsid=1336610144;c=1733831186620) 2024-12-10T11:46:28,088 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x254300a24da75ed7 with lease ID 0xca5e5233a758dbac: from storage DS-f7b01ebd-9220-40bf-a3dc-f171b1eb54d3 node DatanodeRegistration(127.0.0.1:43989, datanodeUuid=ffcc43a6-daf2-4943-8178-3452b18ebd17, infoPort=40041, infoSecurePort=0, ipcPort=41201, storageInfo=lv=-57;cid=testClusterID;nsid=1336610144;c=1733831186620), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-10T11:46:28,089 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x254300a24da75ed7 with lease ID 0xca5e5233a758dbac: Processing first storage report for DS-2718aa97-9063-4fae-a771-f96f69ae0a34 from datanode DatanodeRegistration(127.0.0.1:43989, datanodeUuid=ffcc43a6-daf2-4943-8178-3452b18ebd17, infoPort=40041, infoSecurePort=0, ipcPort=41201, storageInfo=lv=-57;cid=testClusterID;nsid=1336610144;c=1733831186620) 2024-12-10T11:46:28,089 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x254300a24da75ed7 with lease ID 0xca5e5233a758dbac: from storage DS-2718aa97-9063-4fae-a771-f96f69ae0a34 node DatanodeRegistration(127.0.0.1:43989, datanodeUuid=ffcc43a6-daf2-4943-8178-3452b18ebd17, infoPort=40041, infoSecurePort=0, ipcPort=41201, storageInfo=lv=-57;cid=testClusterID;nsid=1336610144;c=1733831186620), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-10T11:46:28,367 WARN [Thread-582 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1fe8c5e9-199c-4b47-c3e3-1284748a1506/cluster_65dad53e-0639-7dd9-5754-38c942081c2f/data/data3/current/BP-21781453-172.17.0.2-1733831186620/current, will proceed with Du for space computation calculation, 2024-12-10T11:46:28,367 WARN [Thread-583 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1fe8c5e9-199c-4b47-c3e3-1284748a1506/cluster_65dad53e-0639-7dd9-5754-38c942081c2f/data/data4/current/BP-21781453-172.17.0.2-1733831186620/current, will proceed with Du for space computation calculation, 2024-12-10T11:46:28,387 WARN [Thread-534 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-10T11:46:28,389 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xb003936bdc41a611 with lease ID 0xca5e5233a758dbad: Processing first storage report for DS-2ee22d16-cd10-432e-984b-f9b6b87aa386 from datanode DatanodeRegistration(127.0.0.1:34141, datanodeUuid=cd33c8e5-3924-48e2-87e7-991dc27a2de4, infoPort=36023, infoSecurePort=0, ipcPort=37697, storageInfo=lv=-57;cid=testClusterID;nsid=1336610144;c=1733831186620) 2024-12-10T11:46:28,389 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb003936bdc41a611 with lease ID 0xca5e5233a758dbad: from storage DS-2ee22d16-cd10-432e-984b-f9b6b87aa386 node DatanodeRegistration(127.0.0.1:34141, datanodeUuid=cd33c8e5-3924-48e2-87e7-991dc27a2de4, infoPort=36023, infoSecurePort=0, ipcPort=37697, storageInfo=lv=-57;cid=testClusterID;nsid=1336610144;c=1733831186620), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-10T11:46:28,389 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xb003936bdc41a611 with lease ID 0xca5e5233a758dbad: Processing first storage report for DS-7ca2bf78-8f46-4442-8ff3-0ee479b5200e from datanode DatanodeRegistration(127.0.0.1:34141, datanodeUuid=cd33c8e5-3924-48e2-87e7-991dc27a2de4, infoPort=36023, infoSecurePort=0, ipcPort=37697, storageInfo=lv=-57;cid=testClusterID;nsid=1336610144;c=1733831186620) 2024-12-10T11:46:28,389 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb003936bdc41a611 with lease ID 0xca5e5233a758dbad: from storage DS-7ca2bf78-8f46-4442-8ff3-0ee479b5200e node DatanodeRegistration(127.0.0.1:34141, datanodeUuid=cd33c8e5-3924-48e2-87e7-991dc27a2de4, infoPort=36023, infoSecurePort=0, ipcPort=37697, storageInfo=lv=-57;cid=testClusterID;nsid=1336610144;c=1733831186620), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-10T11:46:28,467 WARN [Thread-593 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1fe8c5e9-199c-4b47-c3e3-1284748a1506/cluster_65dad53e-0639-7dd9-5754-38c942081c2f/data/data5/current/BP-21781453-172.17.0.2-1733831186620/current, will proceed with Du for space computation calculation, 2024-12-10T11:46:28,467 WARN [Thread-594 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1fe8c5e9-199c-4b47-c3e3-1284748a1506/cluster_65dad53e-0639-7dd9-5754-38c942081c2f/data/data6/current/BP-21781453-172.17.0.2-1733831186620/current, will proceed with Du for space computation calculation, 2024-12-10T11:46:28,483 WARN [Thread-557 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-10T11:46:28,485 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x871532469f234f5c with lease ID 0xca5e5233a758dbae: Processing first storage report for DS-f978e190-464c-4f35-92b7-ed886755680e from datanode DatanodeRegistration(127.0.0.1:38591, datanodeUuid=eb0cb4bc-7b70-4d7c-b175-c81d69ffe028, infoPort=39175, infoSecurePort=0, ipcPort=39697, storageInfo=lv=-57;cid=testClusterID;nsid=1336610144;c=1733831186620) 2024-12-10T11:46:28,485 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x871532469f234f5c with lease ID 0xca5e5233a758dbae: from storage DS-f978e190-464c-4f35-92b7-ed886755680e node DatanodeRegistration(127.0.0.1:38591, datanodeUuid=eb0cb4bc-7b70-4d7c-b175-c81d69ffe028, infoPort=39175, infoSecurePort=0, ipcPort=39697, storageInfo=lv=-57;cid=testClusterID;nsid=1336610144;c=1733831186620), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-10T11:46:28,485 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x871532469f234f5c with lease ID 0xca5e5233a758dbae: Processing first storage report for DS-4096e284-f947-4019-94c6-bc89f30a7419 from datanode DatanodeRegistration(127.0.0.1:38591, datanodeUuid=eb0cb4bc-7b70-4d7c-b175-c81d69ffe028, infoPort=39175, infoSecurePort=0, ipcPort=39697, storageInfo=lv=-57;cid=testClusterID;nsid=1336610144;c=1733831186620) 2024-12-10T11:46:28,485 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x871532469f234f5c with lease ID 0xca5e5233a758dbae: from storage DS-4096e284-f947-4019-94c6-bc89f30a7419 node DatanodeRegistration(127.0.0.1:38591, datanodeUuid=eb0cb4bc-7b70-4d7c-b175-c81d69ffe028, infoPort=39175, infoSecurePort=0, ipcPort=39697, storageInfo=lv=-57;cid=testClusterID;nsid=1336610144;c=1733831186620), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-10T11:46:28,563 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1fe8c5e9-199c-4b47-c3e3-1284748a1506 2024-12-10T11:46:28,582 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1fe8c5e9-199c-4b47-c3e3-1284748a1506/cluster_65dad53e-0639-7dd9-5754-38c942081c2f/zookeeper_0, clientPort=59194, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1fe8c5e9-199c-4b47-c3e3-1284748a1506/cluster_65dad53e-0639-7dd9-5754-38c942081c2f/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1fe8c5e9-199c-4b47-c3e3-1284748a1506/cluster_65dad53e-0639-7dd9-5754-38c942081c2f/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-10T11:46:28,583 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=59194 2024-12-10T11:46:28,584 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-10T11:46:28,586 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-10T11:46:28,602 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34141 is added to blk_1073741825_1001 (size=7) 2024-12-10T11:46:28,603 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38591 is added to blk_1073741825_1001 (size=7) 2024-12-10T11:46:28,604 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43989 is added to blk_1073741825_1001 (size=7) 2024-12-10T11:46:28,606 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:37145/user/jenkins/test-data/0a18477c-72b4-afca-f745-effa86d33fc6 with version=8 2024-12-10T11:46:28,606 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:45975/user/jenkins/test-data/f70f2c4e-74b6-0388-0390-35db2b15c52d/hbase-staging 2024-12-10T11:46:28,608 INFO [Time-limited test {}] client.ConnectionUtils(128): master/ef751fafe6b1:0 server-side Connection retries=45 2024-12-10T11:46:28,609 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-10T11:46:28,609 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-10T11:46:28,609 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-10T11:46:28,609 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-10T11:46:28,609 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-10T11:46:28,609 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-10T11:46:28,609 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-10T11:46:28,610 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:40575 2024-12-10T11:46:28,611 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:40575 connecting to ZooKeeper ensemble=127.0.0.1:59194 2024-12-10T11:46:28,654 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:405750x0, quorum=127.0.0.1:59194, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-10T11:46:28,655 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:40575-0x1000f9a1f020000 connected 2024-12-10T11:46:28,746 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-10T11:46:28,751 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-10T11:46:28,755 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:40575-0x1000f9a1f020000, quorum=127.0.0.1:59194, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-10T11:46:28,756 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:37145/user/jenkins/test-data/0a18477c-72b4-afca-f745-effa86d33fc6, hbase.cluster.distributed=false 2024-12-10T11:46:28,757 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:40575-0x1000f9a1f020000, quorum=127.0.0.1:59194, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-10T11:46:28,758 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=40575 2024-12-10T11:46:28,758 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=40575 2024-12-10T11:46:28,758 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=40575 2024-12-10T11:46:28,758 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=40575 2024-12-10T11:46:28,758 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=40575 2024-12-10T11:46:28,771 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/ef751fafe6b1:0 server-side Connection retries=45 2024-12-10T11:46:28,771 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-10T11:46:28,771 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-10T11:46:28,771 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-10T11:46:28,771 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-10T11:46:28,771 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-10T11:46:28,771 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-10T11:46:28,771 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-10T11:46:28,772 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:41813 2024-12-10T11:46:28,773 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:41813 connecting to ZooKeeper ensemble=127.0.0.1:59194 2024-12-10T11:46:28,774 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-10T11:46:28,775 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-10T11:46:28,787 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:418130x0, quorum=127.0.0.1:59194, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-10T11:46:28,787 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:41813-0x1000f9a1f020001 connected 2024-12-10T11:46:28,787 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41813-0x1000f9a1f020001, quorum=127.0.0.1:59194, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-10T11:46:28,788 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-10T11:46:28,788 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-10T11:46:28,789 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41813-0x1000f9a1f020001, quorum=127.0.0.1:59194, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-10T11:46:28,791 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41813-0x1000f9a1f020001, quorum=127.0.0.1:59194, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-10T11:46:28,791 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=41813 2024-12-10T11:46:28,792 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=41813 2024-12-10T11:46:28,792 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=41813 2024-12-10T11:46:28,793 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=41813 2024-12-10T11:46:28,793 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=41813 2024-12-10T11:46:28,810 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/ef751fafe6b1:0 server-side Connection retries=45 2024-12-10T11:46:28,810 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-10T11:46:28,810 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-10T11:46:28,810 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-10T11:46:28,810 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-10T11:46:28,810 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-10T11:46:28,810 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-10T11:46:28,810 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-10T11:46:28,811 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:42797 2024-12-10T11:46:28,812 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:42797 connecting to ZooKeeper ensemble=127.0.0.1:59194 2024-12-10T11:46:28,812 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-10T11:46:28,813 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-10T11:46:28,825 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:427970x0, quorum=127.0.0.1:59194, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-10T11:46:28,825 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:42797-0x1000f9a1f020002 connected 2024-12-10T11:46:28,825 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42797-0x1000f9a1f020002, quorum=127.0.0.1:59194, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-10T11:46:28,826 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-10T11:46:28,826 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-10T11:46:28,827 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42797-0x1000f9a1f020002, quorum=127.0.0.1:59194, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-10T11:46:28,828 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42797-0x1000f9a1f020002, quorum=127.0.0.1:59194, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-10T11:46:28,829 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=42797 2024-12-10T11:46:28,829 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=42797 2024-12-10T11:46:28,829 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=42797 2024-12-10T11:46:28,829 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=42797 2024-12-10T11:46:28,830 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=42797 2024-12-10T11:46:28,843 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/ef751fafe6b1:0 server-side Connection retries=45 2024-12-10T11:46:28,843 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-10T11:46:28,843 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-10T11:46:28,843 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-10T11:46:28,843 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-10T11:46:28,843 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-10T11:46:28,843 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-10T11:46:28,843 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-10T11:46:28,844 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:39397 2024-12-10T11:46:28,845 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:39397 connecting to ZooKeeper ensemble=127.0.0.1:59194 2024-12-10T11:46:28,845 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-10T11:46:28,847 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-10T11:46:28,858 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:393970x0, quorum=127.0.0.1:59194, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-10T11:46:28,858 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:393970x0, quorum=127.0.0.1:59194, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-10T11:46:28,858 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:39397-0x1000f9a1f020003 connected 2024-12-10T11:46:28,859 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-10T11:46:28,859 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-10T11:46:28,860 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39397-0x1000f9a1f020003, quorum=127.0.0.1:59194, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-10T11:46:28,861 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39397-0x1000f9a1f020003, quorum=127.0.0.1:59194, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-10T11:46:28,861 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=39397 2024-12-10T11:46:28,861 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=39397 2024-12-10T11:46:28,862 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=39397 2024-12-10T11:46:28,862 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=39397 2024-12-10T11:46:28,862 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=39397 2024-12-10T11:46:28,875 DEBUG [M:0;ef751fafe6b1:40575 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;ef751fafe6b1:40575 2024-12-10T11:46:28,875 INFO [master/ef751fafe6b1:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/ef751fafe6b1,40575,1733831188608 2024-12-10T11:46:28,887 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41813-0x1000f9a1f020001, quorum=127.0.0.1:59194, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-10T11:46:28,887 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39397-0x1000f9a1f020003, quorum=127.0.0.1:59194, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-10T11:46:28,887 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40575-0x1000f9a1f020000, quorum=127.0.0.1:59194, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-10T11:46:28,887 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42797-0x1000f9a1f020002, quorum=127.0.0.1:59194, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-10T11:46:28,887 DEBUG [master/ef751fafe6b1:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:40575-0x1000f9a1f020000, quorum=127.0.0.1:59194, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/ef751fafe6b1,40575,1733831188608 2024-12-10T11:46:28,895 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39397-0x1000f9a1f020003, quorum=127.0.0.1:59194, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-10T11:46:28,895 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41813-0x1000f9a1f020001, quorum=127.0.0.1:59194, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-10T11:46:28,895 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40575-0x1000f9a1f020000, quorum=127.0.0.1:59194, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T11:46:28,895 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42797-0x1000f9a1f020002, quorum=127.0.0.1:59194, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-10T11:46:28,895 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41813-0x1000f9a1f020001, quorum=127.0.0.1:59194, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T11:46:28,895 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39397-0x1000f9a1f020003, quorum=127.0.0.1:59194, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T11:46:28,896 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42797-0x1000f9a1f020002, quorum=127.0.0.1:59194, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T11:46:28,896 DEBUG [master/ef751fafe6b1:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:40575-0x1000f9a1f020000, quorum=127.0.0.1:59194, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-10T11:46:28,897 INFO [master/ef751fafe6b1:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/ef751fafe6b1,40575,1733831188608 from backup master directory 2024-12-10T11:46:28,908 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41813-0x1000f9a1f020001, quorum=127.0.0.1:59194, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-10T11:46:28,908 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40575-0x1000f9a1f020000, quorum=127.0.0.1:59194, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/ef751fafe6b1,40575,1733831188608 2024-12-10T11:46:28,908 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42797-0x1000f9a1f020002, quorum=127.0.0.1:59194, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-10T11:46:28,908 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39397-0x1000f9a1f020003, quorum=127.0.0.1:59194, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-10T11:46:28,908 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40575-0x1000f9a1f020000, quorum=127.0.0.1:59194, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-10T11:46:28,908 WARN [master/ef751fafe6b1:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-10T11:46:28,908 INFO [master/ef751fafe6b1:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=ef751fafe6b1,40575,1733831188608 2024-12-10T11:46:28,915 DEBUG [master/ef751fafe6b1:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:37145/user/jenkins/test-data/0a18477c-72b4-afca-f745-effa86d33fc6/hbase.id] with ID: 8b1c8a55-ebc6-4962-bb92-423921e74732 2024-12-10T11:46:28,915 DEBUG [master/ef751fafe6b1:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:37145/user/jenkins/test-data/0a18477c-72b4-afca-f745-effa86d33fc6/.tmp/hbase.id 2024-12-10T11:46:28,922 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38591 is added to blk_1073741826_1002 (size=42) 2024-12-10T11:46:28,923 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34141 is added to blk_1073741826_1002 (size=42) 2024-12-10T11:46:28,923 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43989 is added to blk_1073741826_1002 (size=42) 2024-12-10T11:46:28,924 DEBUG [master/ef751fafe6b1:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:37145/user/jenkins/test-data/0a18477c-72b4-afca-f745-effa86d33fc6/.tmp/hbase.id]:[hdfs://localhost:37145/user/jenkins/test-data/0a18477c-72b4-afca-f745-effa86d33fc6/hbase.id] 2024-12-10T11:46:28,938 INFO [master/ef751fafe6b1:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-10T11:46:28,938 INFO [master/ef751fafe6b1:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-10T11:46:28,940 INFO [master/ef751fafe6b1:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-12-10T11:46:28,949 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39397-0x1000f9a1f020003, quorum=127.0.0.1:59194, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T11:46:28,949 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41813-0x1000f9a1f020001, quorum=127.0.0.1:59194, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T11:46:28,949 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40575-0x1000f9a1f020000, quorum=127.0.0.1:59194, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T11:46:28,949 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42797-0x1000f9a1f020002, quorum=127.0.0.1:59194, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T11:46:28,958 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43989 is added to blk_1073741827_1003 (size=196) 2024-12-10T11:46:28,958 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38591 is added to blk_1073741827_1003 (size=196) 2024-12-10T11:46:28,959 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34141 is added to blk_1073741827_1003 (size=196) 2024-12-10T11:46:28,960 INFO [master/ef751fafe6b1:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-10T11:46:28,960 INFO [master/ef751fafe6b1:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-10T11:46:28,961 INFO [master/ef751fafe6b1:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-10T11:46:28,972 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43989 is added to blk_1073741828_1004 (size=1189) 2024-12-10T11:46:28,972 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38591 is added to blk_1073741828_1004 (size=1189) 2024-12-10T11:46:28,972 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34141 is added to blk_1073741828_1004 (size=1189) 2024-12-10T11:46:28,973 INFO [master/ef751fafe6b1:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:37145/user/jenkins/test-data/0a18477c-72b4-afca-f745-effa86d33fc6/MasterData/data/master/store 2024-12-10T11:46:28,984 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34141 is added to blk_1073741829_1005 (size=34) 2024-12-10T11:46:28,985 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43989 is added to blk_1073741829_1005 (size=34) 2024-12-10T11:46:28,985 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38591 is added to blk_1073741829_1005 (size=34) 2024-12-10T11:46:28,985 DEBUG [master/ef751fafe6b1:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T11:46:28,985 DEBUG [master/ef751fafe6b1:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-10T11:46:28,985 INFO [master/ef751fafe6b1:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-10T11:46:28,985 DEBUG [master/ef751fafe6b1:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-10T11:46:28,985 DEBUG [master/ef751fafe6b1:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-10T11:46:28,985 DEBUG [master/ef751fafe6b1:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-10T11:46:28,986 INFO [master/ef751fafe6b1:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-10T11:46:28,986 DEBUG [master/ef751fafe6b1:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733831188985Disabling compacts and flushes for region at 1733831188985Disabling writes for close at 1733831188985Writing region close event to WAL at 1733831188986 (+1 ms)Closed at 1733831188986 2024-12-10T11:46:28,986 WARN [master/ef751fafe6b1:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:37145/user/jenkins/test-data/0a18477c-72b4-afca-f745-effa86d33fc6/MasterData/data/master/store/.initializing 2024-12-10T11:46:28,987 DEBUG [master/ef751fafe6b1:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:37145/user/jenkins/test-data/0a18477c-72b4-afca-f745-effa86d33fc6/MasterData/WALs/ef751fafe6b1,40575,1733831188608 2024-12-10T11:46:28,990 INFO [master/ef751fafe6b1:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=ef751fafe6b1%2C40575%2C1733831188608, suffix=, logDir=hdfs://localhost:37145/user/jenkins/test-data/0a18477c-72b4-afca-f745-effa86d33fc6/MasterData/WALs/ef751fafe6b1,40575,1733831188608, archiveDir=hdfs://localhost:37145/user/jenkins/test-data/0a18477c-72b4-afca-f745-effa86d33fc6/MasterData/oldWALs, maxLogs=10 2024-12-10T11:46:28,991 INFO [master/ef751fafe6b1:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor ef751fafe6b1%2C40575%2C1733831188608.1733831188990 2024-12-10T11:46:29,000 INFO [master/ef751fafe6b1:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/0a18477c-72b4-afca-f745-effa86d33fc6/MasterData/WALs/ef751fafe6b1,40575,1733831188608/ef751fafe6b1%2C40575%2C1733831188608.1733831188990 2024-12-10T11:46:29,002 DEBUG [master/ef751fafe6b1:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39175:39175),(127.0.0.1/127.0.0.1:36023:36023),(127.0.0.1/127.0.0.1:40041:40041)] 2024-12-10T11:46:29,003 DEBUG [master/ef751fafe6b1:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-10T11:46:29,003 DEBUG [master/ef751fafe6b1:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T11:46:29,003 DEBUG [master/ef751fafe6b1:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-10T11:46:29,003 DEBUG [master/ef751fafe6b1:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-10T11:46:29,005 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-10T11:46:29,007 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-10T11:46:29,007 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T11:46:29,008 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-10T11:46:29,008 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-10T11:46:29,009 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-10T11:46:29,010 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T11:46:29,010 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T11:46:29,010 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-10T11:46:29,013 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-10T11:46:29,013 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T11:46:29,014 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T11:46:29,014 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-10T11:46:29,016 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-10T11:46:29,017 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T11:46:29,017 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T11:46:29,018 DEBUG [master/ef751fafe6b1:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-10T11:46:29,018 DEBUG [master/ef751fafe6b1:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37145/user/jenkins/test-data/0a18477c-72b4-afca-f745-effa86d33fc6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-10T11:46:29,019 DEBUG [master/ef751fafe6b1:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37145/user/jenkins/test-data/0a18477c-72b4-afca-f745-effa86d33fc6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-10T11:46:29,021 DEBUG [master/ef751fafe6b1:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-10T11:46:29,021 DEBUG [master/ef751fafe6b1:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-10T11:46:29,021 DEBUG [master/ef751fafe6b1:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-10T11:46:29,023 DEBUG [master/ef751fafe6b1:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-10T11:46:29,026 DEBUG [master/ef751fafe6b1:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37145/user/jenkins/test-data/0a18477c-72b4-afca-f745-effa86d33fc6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-10T11:46:29,026 INFO [master/ef751fafe6b1:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=62100941, jitterRate=-0.07462386786937714}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-10T11:46:29,027 DEBUG [master/ef751fafe6b1:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733831189003Initializing all the Stores at 1733831189004 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733831189005 (+1 ms)Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733831189005Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733831189005Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733831189005Cleaning up temporary data from old regions at 1733831189021 (+16 ms)Region opened successfully at 1733831189027 (+6 ms) 2024-12-10T11:46:29,027 INFO [master/ef751fafe6b1:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-10T11:46:29,032 DEBUG [master/ef751fafe6b1:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7cf27080, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=ef751fafe6b1/172.17.0.2:0 2024-12-10T11:46:29,033 INFO [master/ef751fafe6b1:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-10T11:46:29,033 INFO [master/ef751fafe6b1:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-10T11:46:29,034 INFO [master/ef751fafe6b1:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-10T11:46:29,034 INFO [master/ef751fafe6b1:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-10T11:46:29,034 INFO [master/ef751fafe6b1:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-12-10T11:46:29,035 INFO [master/ef751fafe6b1:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-12-10T11:46:29,035 INFO [master/ef751fafe6b1:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-10T11:46:29,038 INFO [master/ef751fafe6b1:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-10T11:46:29,039 DEBUG [master/ef751fafe6b1:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40575-0x1000f9a1f020000, quorum=127.0.0.1:59194, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-10T11:46:29,062 DEBUG [master/ef751fafe6b1:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-10T11:46:29,062 INFO [master/ef751fafe6b1:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-10T11:46:29,063 DEBUG [master/ef751fafe6b1:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40575-0x1000f9a1f020000, quorum=127.0.0.1:59194, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-10T11:46:29,074 DEBUG [master/ef751fafe6b1:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-10T11:46:29,075 INFO [master/ef751fafe6b1:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-10T11:46:29,076 DEBUG [master/ef751fafe6b1:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40575-0x1000f9a1f020000, quorum=127.0.0.1:59194, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-10T11:46:29,087 DEBUG [master/ef751fafe6b1:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-10T11:46:29,089 DEBUG [master/ef751fafe6b1:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40575-0x1000f9a1f020000, quorum=127.0.0.1:59194, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-10T11:46:29,100 DEBUG [master/ef751fafe6b1:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-10T11:46:29,105 DEBUG [master/ef751fafe6b1:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40575-0x1000f9a1f020000, quorum=127.0.0.1:59194, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-10T11:46:29,116 DEBUG [master/ef751fafe6b1:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-10T11:46:29,129 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40575-0x1000f9a1f020000, quorum=127.0.0.1:59194, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-10T11:46:29,129 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41813-0x1000f9a1f020001, quorum=127.0.0.1:59194, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-10T11:46:29,129 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42797-0x1000f9a1f020002, quorum=127.0.0.1:59194, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-10T11:46:29,129 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39397-0x1000f9a1f020003, quorum=127.0.0.1:59194, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-10T11:46:29,130 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41813-0x1000f9a1f020001, quorum=127.0.0.1:59194, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T11:46:29,130 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40575-0x1000f9a1f020000, quorum=127.0.0.1:59194, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T11:46:29,130 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39397-0x1000f9a1f020003, quorum=127.0.0.1:59194, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T11:46:29,130 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42797-0x1000f9a1f020002, quorum=127.0.0.1:59194, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T11:46:29,132 INFO [master/ef751fafe6b1:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=ef751fafe6b1,40575,1733831188608, sessionid=0x1000f9a1f020000, setting cluster-up flag (Was=false) 2024-12-10T11:46:29,149 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41813-0x1000f9a1f020001, quorum=127.0.0.1:59194, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T11:46:29,149 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42797-0x1000f9a1f020002, quorum=127.0.0.1:59194, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T11:46:29,149 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39397-0x1000f9a1f020003, quorum=127.0.0.1:59194, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T11:46:29,149 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40575-0x1000f9a1f020000, quorum=127.0.0.1:59194, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T11:46:29,175 DEBUG [master/ef751fafe6b1:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-10T11:46:29,176 DEBUG [master/ef751fafe6b1:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=ef751fafe6b1,40575,1733831188608 2024-12-10T11:46:29,195 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39397-0x1000f9a1f020003, quorum=127.0.0.1:59194, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T11:46:29,195 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42797-0x1000f9a1f020002, quorum=127.0.0.1:59194, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T11:46:29,195 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40575-0x1000f9a1f020000, quorum=127.0.0.1:59194, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T11:46:29,195 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41813-0x1000f9a1f020001, quorum=127.0.0.1:59194, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T11:46:29,224 DEBUG [master/ef751fafe6b1:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-10T11:46:29,226 DEBUG [master/ef751fafe6b1:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=ef751fafe6b1,40575,1733831188608 2024-12-10T11:46:29,228 INFO [master/ef751fafe6b1:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:37145/user/jenkins/test-data/0a18477c-72b4-afca-f745-effa86d33fc6/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-10T11:46:29,230 DEBUG [master/ef751fafe6b1:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-10T11:46:29,231 INFO [master/ef751fafe6b1:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-10T11:46:29,231 INFO [master/ef751fafe6b1:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-10T11:46:29,231 DEBUG [master/ef751fafe6b1:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: ef751fafe6b1,40575,1733831188608 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-10T11:46:29,233 DEBUG [master/ef751fafe6b1:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/ef751fafe6b1:0, corePoolSize=5, maxPoolSize=5 2024-12-10T11:46:29,233 DEBUG [master/ef751fafe6b1:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/ef751fafe6b1:0, corePoolSize=5, maxPoolSize=5 2024-12-10T11:46:29,233 DEBUG [master/ef751fafe6b1:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/ef751fafe6b1:0, corePoolSize=5, maxPoolSize=5 2024-12-10T11:46:29,233 DEBUG [master/ef751fafe6b1:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/ef751fafe6b1:0, corePoolSize=5, maxPoolSize=5 2024-12-10T11:46:29,233 DEBUG [master/ef751fafe6b1:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/ef751fafe6b1:0, corePoolSize=10, maxPoolSize=10 2024-12-10T11:46:29,233 DEBUG [master/ef751fafe6b1:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/ef751fafe6b1:0, corePoolSize=1, maxPoolSize=1 2024-12-10T11:46:29,233 DEBUG [master/ef751fafe6b1:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/ef751fafe6b1:0, corePoolSize=2, maxPoolSize=2 2024-12-10T11:46:29,233 DEBUG [master/ef751fafe6b1:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/ef751fafe6b1:0, corePoolSize=1, maxPoolSize=1 2024-12-10T11:46:29,234 INFO [master/ef751fafe6b1:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733831219234 2024-12-10T11:46:29,234 INFO [master/ef751fafe6b1:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-10T11:46:29,234 INFO [master/ef751fafe6b1:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-10T11:46:29,234 INFO [master/ef751fafe6b1:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-10T11:46:29,234 INFO [master/ef751fafe6b1:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-10T11:46:29,234 INFO [master/ef751fafe6b1:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-10T11:46:29,234 INFO [master/ef751fafe6b1:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-10T11:46:29,234 INFO [master/ef751fafe6b1:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-10T11:46:29,235 INFO [master/ef751fafe6b1:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-10T11:46:29,235 INFO [master/ef751fafe6b1:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-10T11:46:29,235 INFO [master/ef751fafe6b1:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-10T11:46:29,235 INFO [master/ef751fafe6b1:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-10T11:46:29,235 INFO [master/ef751fafe6b1:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-10T11:46:29,235 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-10T11:46:29,235 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-10T11:46:29,235 DEBUG [master/ef751fafe6b1:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/ef751fafe6b1:0:becomeActiveMaster-HFileCleaner.large.0-1733831189235,5,FailOnTimeoutGroup] 2024-12-10T11:46:29,236 DEBUG [master/ef751fafe6b1:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/ef751fafe6b1:0:becomeActiveMaster-HFileCleaner.small.0-1733831189235,5,FailOnTimeoutGroup] 2024-12-10T11:46:29,236 INFO [master/ef751fafe6b1:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-10T11:46:29,236 INFO [master/ef751fafe6b1:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-10T11:46:29,236 INFO [master/ef751fafe6b1:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-10T11:46:29,236 INFO [master/ef751fafe6b1:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-10T11:46:29,237 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T11:46:29,237 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-10T11:46:29,247 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43989 is added to blk_1073741831_1007 (size=1321) 2024-12-10T11:46:29,247 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38591 is added to blk_1073741831_1007 (size=1321) 2024-12-10T11:46:29,247 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34141 is added to blk_1073741831_1007 (size=1321) 2024-12-10T11:46:29,248 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:37145/user/jenkins/test-data/0a18477c-72b4-afca-f745-effa86d33fc6/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-10T11:46:29,248 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:37145/user/jenkins/test-data/0a18477c-72b4-afca-f745-effa86d33fc6 2024-12-10T11:46:29,260 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43989 is added to blk_1073741832_1008 (size=32) 2024-12-10T11:46:29,260 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34141 is added to blk_1073741832_1008 (size=32) 2024-12-10T11:46:29,260 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38591 is added to blk_1073741832_1008 (size=32) 2024-12-10T11:46:29,261 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T11:46:29,262 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-10T11:46:29,264 INFO [RS:2;ef751fafe6b1:39397 {}] regionserver.HRegionServer(746): ClusterId : 8b1c8a55-ebc6-4962-bb92-423921e74732 2024-12-10T11:46:29,264 INFO [RS:1;ef751fafe6b1:42797 {}] regionserver.HRegionServer(746): ClusterId : 8b1c8a55-ebc6-4962-bb92-423921e74732 2024-12-10T11:46:29,264 INFO [RS:0;ef751fafe6b1:41813 {}] regionserver.HRegionServer(746): ClusterId : 8b1c8a55-ebc6-4962-bb92-423921e74732 2024-12-10T11:46:29,264 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-10T11:46:29,264 DEBUG [RS:0;ef751fafe6b1:41813 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-10T11:46:29,264 DEBUG [RS:2;ef751fafe6b1:39397 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-10T11:46:29,264 DEBUG [RS:1;ef751fafe6b1:42797 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-10T11:46:29,264 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T11:46:29,265 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-10T11:46:29,265 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-10T11:46:29,267 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-10T11:46:29,267 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T11:46:29,267 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-10T11:46:29,268 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-10T11:46:29,269 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-10T11:46:29,269 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T11:46:29,270 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-10T11:46:29,270 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-10T11:46:29,271 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-10T11:46:29,271 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T11:46:29,272 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-10T11:46:29,272 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-10T11:46:29,273 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37145/user/jenkins/test-data/0a18477c-72b4-afca-f745-effa86d33fc6/data/hbase/meta/1588230740 2024-12-10T11:46:29,274 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37145/user/jenkins/test-data/0a18477c-72b4-afca-f745-effa86d33fc6/data/hbase/meta/1588230740 2024-12-10T11:46:29,275 DEBUG [RS:0;ef751fafe6b1:41813 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-10T11:46:29,275 DEBUG [RS:1;ef751fafe6b1:42797 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-10T11:46:29,275 DEBUG [RS:0;ef751fafe6b1:41813 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-10T11:46:29,275 DEBUG [RS:1;ef751fafe6b1:42797 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-10T11:46:29,275 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-10T11:46:29,276 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-10T11:46:29,276 DEBUG [RS:2;ef751fafe6b1:39397 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-10T11:46:29,276 DEBUG [RS:2;ef751fafe6b1:39397 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-10T11:46:29,276 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-10T11:46:29,278 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-10T11:46:29,281 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37145/user/jenkins/test-data/0a18477c-72b4-afca-f745-effa86d33fc6/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-10T11:46:29,281 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=64860399, jitterRate=-0.03350473940372467}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-10T11:46:29,282 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733831189261Initializing all the Stores at 1733831189262 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733831189262Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733831189262Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733831189262Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733831189262Cleaning up temporary data from old regions at 1733831189276 (+14 ms)Region opened successfully at 1733831189282 (+6 ms) 2024-12-10T11:46:29,283 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-10T11:46:29,283 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-10T11:46:29,283 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-10T11:46:29,283 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-10T11:46:29,283 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-10T11:46:29,283 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-10T11:46:29,283 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733831189283Disabling compacts and flushes for region at 1733831189283Disabling writes for close at 1733831189283Writing region close event to WAL at 1733831189283Closed at 1733831189283 2024-12-10T11:46:29,285 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-10T11:46:29,285 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-10T11:46:29,285 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-10T11:46:29,287 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-10T11:46:29,288 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-10T11:46:29,296 DEBUG [RS:0;ef751fafe6b1:41813 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-10T11:46:29,296 DEBUG [RS:1;ef751fafe6b1:42797 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-10T11:46:29,296 DEBUG [RS:2;ef751fafe6b1:39397 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-10T11:46:29,296 DEBUG [RS:0;ef751fafe6b1:41813 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4cc8dac5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=ef751fafe6b1/172.17.0.2:0 2024-12-10T11:46:29,296 DEBUG [RS:2;ef751fafe6b1:39397 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@76a03f2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=ef751fafe6b1/172.17.0.2:0 2024-12-10T11:46:29,296 DEBUG [RS:1;ef751fafe6b1:42797 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@52f34b05, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=ef751fafe6b1/172.17.0.2:0 2024-12-10T11:46:29,311 DEBUG [RS:0;ef751fafe6b1:41813 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;ef751fafe6b1:41813 2024-12-10T11:46:29,311 INFO [RS:0;ef751fafe6b1:41813 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-10T11:46:29,311 INFO [RS:0;ef751fafe6b1:41813 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-10T11:46:29,311 DEBUG [RS:0;ef751fafe6b1:41813 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-10T11:46:29,311 DEBUG [RS:2;ef751fafe6b1:39397 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:2;ef751fafe6b1:39397 2024-12-10T11:46:29,312 INFO [RS:2;ef751fafe6b1:39397 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-10T11:46:29,312 INFO [RS:2;ef751fafe6b1:39397 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-10T11:46:29,312 DEBUG [RS:2;ef751fafe6b1:39397 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-10T11:46:29,312 INFO [RS:0;ef751fafe6b1:41813 {}] regionserver.HRegionServer(2659): reportForDuty to master=ef751fafe6b1,40575,1733831188608 with port=41813, startcode=1733831188771 2024-12-10T11:46:29,312 INFO [RS:2;ef751fafe6b1:39397 {}] regionserver.HRegionServer(2659): reportForDuty to master=ef751fafe6b1,40575,1733831188608 with port=39397, startcode=1733831188842 2024-12-10T11:46:29,313 DEBUG [RS:2;ef751fafe6b1:39397 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-10T11:46:29,313 DEBUG [RS:0;ef751fafe6b1:41813 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-10T11:46:29,313 DEBUG [RS:1;ef751fafe6b1:42797 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;ef751fafe6b1:42797 2024-12-10T11:46:29,313 INFO [RS:1;ef751fafe6b1:42797 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-10T11:46:29,313 INFO [RS:1;ef751fafe6b1:42797 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-10T11:46:29,313 DEBUG [RS:1;ef751fafe6b1:42797 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-10T11:46:29,314 INFO [RS:1;ef751fafe6b1:42797 {}] regionserver.HRegionServer(2659): reportForDuty to master=ef751fafe6b1,40575,1733831188608 with port=42797, startcode=1733831188809 2024-12-10T11:46:29,314 DEBUG [RS:1;ef751fafe6b1:42797 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-10T11:46:29,315 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36735, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.3 (auth:SIMPLE), service=RegionServerStatusService 2024-12-10T11:46:29,315 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43345, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.5 (auth:SIMPLE), service=RegionServerStatusService 2024-12-10T11:46:29,315 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58577, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.4 (auth:SIMPLE), service=RegionServerStatusService 2024-12-10T11:46:29,316 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40575 {}] master.ServerManager(363): Checking decommissioned status of RegionServer ef751fafe6b1,41813,1733831188771 2024-12-10T11:46:29,316 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40575 {}] master.ServerManager(517): Registering regionserver=ef751fafe6b1,41813,1733831188771 2024-12-10T11:46:29,318 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40575 {}] master.ServerManager(363): Checking decommissioned status of RegionServer ef751fafe6b1,39397,1733831188842 2024-12-10T11:46:29,318 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40575 {}] master.ServerManager(517): Registering regionserver=ef751fafe6b1,39397,1733831188842 2024-12-10T11:46:29,318 DEBUG [RS:0;ef751fafe6b1:41813 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:37145/user/jenkins/test-data/0a18477c-72b4-afca-f745-effa86d33fc6 2024-12-10T11:46:29,318 DEBUG [RS:0;ef751fafe6b1:41813 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:37145 2024-12-10T11:46:29,318 DEBUG [RS:0;ef751fafe6b1:41813 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-10T11:46:29,320 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40575 {}] master.ServerManager(363): Checking decommissioned status of RegionServer ef751fafe6b1,42797,1733831188809 2024-12-10T11:46:29,320 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40575 {}] master.ServerManager(517): Registering regionserver=ef751fafe6b1,42797,1733831188809 2024-12-10T11:46:29,320 DEBUG [RS:2;ef751fafe6b1:39397 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:37145/user/jenkins/test-data/0a18477c-72b4-afca-f745-effa86d33fc6 2024-12-10T11:46:29,320 DEBUG [RS:2;ef751fafe6b1:39397 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:37145 2024-12-10T11:46:29,320 DEBUG [RS:2;ef751fafe6b1:39397 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-10T11:46:29,322 DEBUG [RS:1;ef751fafe6b1:42797 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:37145/user/jenkins/test-data/0a18477c-72b4-afca-f745-effa86d33fc6 2024-12-10T11:46:29,322 DEBUG [RS:1;ef751fafe6b1:42797 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:37145 2024-12-10T11:46:29,322 DEBUG [RS:1;ef751fafe6b1:42797 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-10T11:46:29,328 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40575-0x1000f9a1f020000, quorum=127.0.0.1:59194, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-10T11:46:29,360 DEBUG [RS:0;ef751fafe6b1:41813 {}] zookeeper.ZKUtil(111): regionserver:41813-0x1000f9a1f020001, quorum=127.0.0.1:59194, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/ef751fafe6b1,41813,1733831188771 2024-12-10T11:46:29,360 WARN [RS:0;ef751fafe6b1:41813 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-10T11:46:29,360 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [ef751fafe6b1,39397,1733831188842] 2024-12-10T11:46:29,360 INFO [RS:0;ef751fafe6b1:41813 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-10T11:46:29,360 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [ef751fafe6b1,41813,1733831188771] 2024-12-10T11:46:29,360 DEBUG [RS:1;ef751fafe6b1:42797 {}] zookeeper.ZKUtil(111): regionserver:42797-0x1000f9a1f020002, quorum=127.0.0.1:59194, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/ef751fafe6b1,42797,1733831188809 2024-12-10T11:46:29,360 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [ef751fafe6b1,42797,1733831188809] 2024-12-10T11:46:29,360 DEBUG [RS:2;ef751fafe6b1:39397 {}] zookeeper.ZKUtil(111): regionserver:39397-0x1000f9a1f020003, quorum=127.0.0.1:59194, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/ef751fafe6b1,39397,1733831188842 2024-12-10T11:46:29,360 WARN [RS:1;ef751fafe6b1:42797 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-10T11:46:29,360 DEBUG [RS:0;ef751fafe6b1:41813 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:37145/user/jenkins/test-data/0a18477c-72b4-afca-f745-effa86d33fc6/WALs/ef751fafe6b1,41813,1733831188771 2024-12-10T11:46:29,360 WARN [RS:2;ef751fafe6b1:39397 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-10T11:46:29,360 INFO [RS:1;ef751fafe6b1:42797 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-10T11:46:29,360 INFO [RS:2;ef751fafe6b1:39397 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-10T11:46:29,360 DEBUG [RS:1;ef751fafe6b1:42797 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:37145/user/jenkins/test-data/0a18477c-72b4-afca-f745-effa86d33fc6/WALs/ef751fafe6b1,42797,1733831188809 2024-12-10T11:46:29,360 DEBUG [RS:2;ef751fafe6b1:39397 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:37145/user/jenkins/test-data/0a18477c-72b4-afca-f745-effa86d33fc6/WALs/ef751fafe6b1,39397,1733831188842 2024-12-10T11:46:29,364 INFO [RS:1;ef751fafe6b1:42797 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-10T11:46:29,364 INFO [RS:0;ef751fafe6b1:41813 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-10T11:46:29,364 INFO [RS:2;ef751fafe6b1:39397 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-10T11:46:29,366 INFO [RS:1;ef751fafe6b1:42797 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-10T11:46:29,367 INFO [RS:1;ef751fafe6b1:42797 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-10T11:46:29,367 INFO [RS:1;ef751fafe6b1:42797 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-10T11:46:29,368 INFO [RS:1;ef751fafe6b1:42797 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-10T11:46:29,368 INFO [RS:2;ef751fafe6b1:39397 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-10T11:46:29,369 INFO [RS:2;ef751fafe6b1:39397 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-10T11:46:29,369 INFO [RS:2;ef751fafe6b1:39397 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-10T11:46:29,369 INFO [RS:1;ef751fafe6b1:42797 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-10T11:46:29,369 INFO [RS:1;ef751fafe6b1:42797 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-10T11:46:29,370 DEBUG [RS:1;ef751fafe6b1:42797 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/ef751fafe6b1:0, corePoolSize=1, maxPoolSize=1 2024-12-10T11:46:29,370 DEBUG [RS:1;ef751fafe6b1:42797 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/ef751fafe6b1:0, corePoolSize=1, maxPoolSize=1 2024-12-10T11:46:29,370 DEBUG [RS:1;ef751fafe6b1:42797 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/ef751fafe6b1:0, corePoolSize=1, maxPoolSize=1 2024-12-10T11:46:29,370 DEBUG [RS:1;ef751fafe6b1:42797 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/ef751fafe6b1:0, corePoolSize=1, maxPoolSize=1 2024-12-10T11:46:29,370 DEBUG [RS:1;ef751fafe6b1:42797 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/ef751fafe6b1:0, corePoolSize=1, maxPoolSize=1 2024-12-10T11:46:29,370 DEBUG [RS:1;ef751fafe6b1:42797 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/ef751fafe6b1:0, corePoolSize=2, maxPoolSize=2 2024-12-10T11:46:29,370 DEBUG [RS:1;ef751fafe6b1:42797 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/ef751fafe6b1:0, corePoolSize=1, maxPoolSize=1 2024-12-10T11:46:29,370 DEBUG [RS:1;ef751fafe6b1:42797 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/ef751fafe6b1:0, corePoolSize=1, maxPoolSize=1 2024-12-10T11:46:29,370 DEBUG [RS:1;ef751fafe6b1:42797 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/ef751fafe6b1:0, corePoolSize=1, maxPoolSize=1 2024-12-10T11:46:29,370 DEBUG [RS:1;ef751fafe6b1:42797 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/ef751fafe6b1:0, corePoolSize=1, maxPoolSize=1 2024-12-10T11:46:29,370 DEBUG [RS:1;ef751fafe6b1:42797 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/ef751fafe6b1:0, corePoolSize=1, maxPoolSize=1 2024-12-10T11:46:29,370 DEBUG [RS:1;ef751fafe6b1:42797 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/ef751fafe6b1:0, corePoolSize=1, maxPoolSize=1 2024-12-10T11:46:29,370 DEBUG [RS:1;ef751fafe6b1:42797 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/ef751fafe6b1:0, corePoolSize=3, maxPoolSize=3 2024-12-10T11:46:29,370 DEBUG [RS:1;ef751fafe6b1:42797 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/ef751fafe6b1:0, corePoolSize=3, maxPoolSize=3 2024-12-10T11:46:29,371 INFO [RS:2;ef751fafe6b1:39397 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-10T11:46:29,371 INFO [RS:0;ef751fafe6b1:41813 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-10T11:46:29,372 INFO [RS:2;ef751fafe6b1:39397 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-10T11:46:29,372 INFO [RS:2;ef751fafe6b1:39397 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-10T11:46:29,372 DEBUG [RS:2;ef751fafe6b1:39397 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/ef751fafe6b1:0, corePoolSize=1, maxPoolSize=1 2024-12-10T11:46:29,372 DEBUG [RS:2;ef751fafe6b1:39397 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/ef751fafe6b1:0, corePoolSize=1, maxPoolSize=1 2024-12-10T11:46:29,373 DEBUG [RS:2;ef751fafe6b1:39397 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/ef751fafe6b1:0, corePoolSize=1, maxPoolSize=1 2024-12-10T11:46:29,373 DEBUG [RS:2;ef751fafe6b1:39397 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/ef751fafe6b1:0, corePoolSize=1, maxPoolSize=1 2024-12-10T11:46:29,373 INFO [RS:0;ef751fafe6b1:41813 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-10T11:46:29,373 DEBUG [RS:2;ef751fafe6b1:39397 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/ef751fafe6b1:0, corePoolSize=1, maxPoolSize=1 2024-12-10T11:46:29,373 INFO [RS:0;ef751fafe6b1:41813 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-10T11:46:29,373 DEBUG [RS:2;ef751fafe6b1:39397 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/ef751fafe6b1:0, corePoolSize=2, maxPoolSize=2 2024-12-10T11:46:29,373 DEBUG [RS:2;ef751fafe6b1:39397 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/ef751fafe6b1:0, corePoolSize=1, maxPoolSize=1 2024-12-10T11:46:29,373 DEBUG [RS:2;ef751fafe6b1:39397 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/ef751fafe6b1:0, corePoolSize=1, maxPoolSize=1 2024-12-10T11:46:29,373 DEBUG [RS:2;ef751fafe6b1:39397 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/ef751fafe6b1:0, corePoolSize=1, maxPoolSize=1 2024-12-10T11:46:29,373 DEBUG [RS:2;ef751fafe6b1:39397 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/ef751fafe6b1:0, corePoolSize=1, maxPoolSize=1 2024-12-10T11:46:29,373 DEBUG [RS:2;ef751fafe6b1:39397 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/ef751fafe6b1:0, corePoolSize=1, maxPoolSize=1 2024-12-10T11:46:29,373 DEBUG [RS:2;ef751fafe6b1:39397 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/ef751fafe6b1:0, corePoolSize=1, maxPoolSize=1 2024-12-10T11:46:29,373 DEBUG [RS:2;ef751fafe6b1:39397 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/ef751fafe6b1:0, corePoolSize=3, maxPoolSize=3 2024-12-10T11:46:29,373 DEBUG [RS:2;ef751fafe6b1:39397 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/ef751fafe6b1:0, corePoolSize=3, maxPoolSize=3 2024-12-10T11:46:29,375 INFO [RS:1;ef751fafe6b1:42797 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-10T11:46:29,375 INFO [RS:0;ef751fafe6b1:41813 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-10T11:46:29,375 INFO [RS:1;ef751fafe6b1:42797 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-10T11:46:29,375 INFO [RS:1;ef751fafe6b1:42797 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-10T11:46:29,375 INFO [RS:1;ef751fafe6b1:42797 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-10T11:46:29,375 INFO [RS:1;ef751fafe6b1:42797 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-10T11:46:29,375 INFO [RS:1;ef751fafe6b1:42797 {}] hbase.ChoreService(168): Chore ScheduledChore name=ef751fafe6b1,42797,1733831188809-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-10T11:46:29,375 INFO [RS:2;ef751fafe6b1:39397 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-10T11:46:29,375 INFO [RS:2;ef751fafe6b1:39397 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-10T11:46:29,375 INFO [RS:2;ef751fafe6b1:39397 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-10T11:46:29,375 INFO [RS:2;ef751fafe6b1:39397 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-10T11:46:29,375 INFO [RS:2;ef751fafe6b1:39397 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-10T11:46:29,375 INFO [RS:2;ef751fafe6b1:39397 {}] hbase.ChoreService(168): Chore ScheduledChore name=ef751fafe6b1,39397,1733831188842-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-10T11:46:29,376 INFO [RS:0;ef751fafe6b1:41813 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-10T11:46:29,376 INFO [RS:0;ef751fafe6b1:41813 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-10T11:46:29,376 DEBUG [RS:0;ef751fafe6b1:41813 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/ef751fafe6b1:0, corePoolSize=1, maxPoolSize=1 2024-12-10T11:46:29,377 DEBUG [RS:0;ef751fafe6b1:41813 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/ef751fafe6b1:0, corePoolSize=1, maxPoolSize=1 2024-12-10T11:46:29,377 DEBUG [RS:0;ef751fafe6b1:41813 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/ef751fafe6b1:0, corePoolSize=1, maxPoolSize=1 2024-12-10T11:46:29,377 DEBUG [RS:0;ef751fafe6b1:41813 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/ef751fafe6b1:0, corePoolSize=1, maxPoolSize=1 2024-12-10T11:46:29,377 DEBUG [RS:0;ef751fafe6b1:41813 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/ef751fafe6b1:0, corePoolSize=1, maxPoolSize=1 2024-12-10T11:46:29,377 DEBUG [RS:0;ef751fafe6b1:41813 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/ef751fafe6b1:0, corePoolSize=2, maxPoolSize=2 2024-12-10T11:46:29,377 DEBUG [RS:0;ef751fafe6b1:41813 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/ef751fafe6b1:0, corePoolSize=1, maxPoolSize=1 2024-12-10T11:46:29,377 DEBUG [RS:0;ef751fafe6b1:41813 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/ef751fafe6b1:0, corePoolSize=1, maxPoolSize=1 2024-12-10T11:46:29,377 DEBUG [RS:0;ef751fafe6b1:41813 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/ef751fafe6b1:0, corePoolSize=1, maxPoolSize=1 2024-12-10T11:46:29,377 DEBUG [RS:0;ef751fafe6b1:41813 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/ef751fafe6b1:0, corePoolSize=1, maxPoolSize=1 2024-12-10T11:46:29,378 DEBUG [RS:0;ef751fafe6b1:41813 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/ef751fafe6b1:0, corePoolSize=1, maxPoolSize=1 2024-12-10T11:46:29,378 DEBUG [RS:0;ef751fafe6b1:41813 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/ef751fafe6b1:0, corePoolSize=1, maxPoolSize=1 2024-12-10T11:46:29,378 DEBUG [RS:0;ef751fafe6b1:41813 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/ef751fafe6b1:0, corePoolSize=3, maxPoolSize=3 2024-12-10T11:46:29,378 DEBUG [RS:0;ef751fafe6b1:41813 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/ef751fafe6b1:0, corePoolSize=3, maxPoolSize=3 2024-12-10T11:46:29,380 INFO [RS:0;ef751fafe6b1:41813 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-10T11:46:29,380 INFO [RS:0;ef751fafe6b1:41813 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-10T11:46:29,380 INFO [RS:0;ef751fafe6b1:41813 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-10T11:46:29,381 INFO [RS:0;ef751fafe6b1:41813 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-10T11:46:29,381 INFO [RS:0;ef751fafe6b1:41813 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-10T11:46:29,381 INFO [RS:0;ef751fafe6b1:41813 {}] hbase.ChoreService(168): Chore ScheduledChore name=ef751fafe6b1,41813,1733831188771-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-10T11:46:29,394 INFO [RS:2;ef751fafe6b1:39397 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-10T11:46:29,394 INFO [RS:1;ef751fafe6b1:42797 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-10T11:46:29,394 INFO [RS:2;ef751fafe6b1:39397 {}] hbase.ChoreService(168): Chore ScheduledChore name=ef751fafe6b1,39397,1733831188842-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-10T11:46:29,394 INFO [RS:1;ef751fafe6b1:42797 {}] hbase.ChoreService(168): Chore ScheduledChore name=ef751fafe6b1,42797,1733831188809-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-10T11:46:29,394 INFO [RS:1;ef751fafe6b1:42797 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-10T11:46:29,394 INFO [RS:2;ef751fafe6b1:39397 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-10T11:46:29,394 INFO [RS:1;ef751fafe6b1:42797 {}] regionserver.Replication(171): ef751fafe6b1,42797,1733831188809 started 2024-12-10T11:46:29,394 INFO [RS:2;ef751fafe6b1:39397 {}] regionserver.Replication(171): ef751fafe6b1,39397,1733831188842 started 2024-12-10T11:46:29,400 INFO [RS:0;ef751fafe6b1:41813 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-10T11:46:29,401 INFO [RS:0;ef751fafe6b1:41813 {}] hbase.ChoreService(168): Chore ScheduledChore name=ef751fafe6b1,41813,1733831188771-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-10T11:46:29,401 INFO [RS:0;ef751fafe6b1:41813 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-10T11:46:29,401 INFO [RS:0;ef751fafe6b1:41813 {}] regionserver.Replication(171): ef751fafe6b1,41813,1733831188771 started 2024-12-10T11:46:29,413 INFO [RS:2;ef751fafe6b1:39397 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-10T11:46:29,413 INFO [RS:1;ef751fafe6b1:42797 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-10T11:46:29,413 INFO [RS:1;ef751fafe6b1:42797 {}] regionserver.HRegionServer(1482): Serving as ef751fafe6b1,42797,1733831188809, RpcServer on ef751fafe6b1/172.17.0.2:42797, sessionid=0x1000f9a1f020002 2024-12-10T11:46:29,413 INFO [RS:2;ef751fafe6b1:39397 {}] regionserver.HRegionServer(1482): Serving as ef751fafe6b1,39397,1733831188842, RpcServer on ef751fafe6b1/172.17.0.2:39397, sessionid=0x1000f9a1f020003 2024-12-10T11:46:29,413 DEBUG [RS:2;ef751fafe6b1:39397 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-10T11:46:29,413 DEBUG [RS:1;ef751fafe6b1:42797 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-10T11:46:29,414 DEBUG [RS:2;ef751fafe6b1:39397 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager ef751fafe6b1,39397,1733831188842 2024-12-10T11:46:29,414 DEBUG [RS:1;ef751fafe6b1:42797 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager ef751fafe6b1,42797,1733831188809 2024-12-10T11:46:29,414 DEBUG [RS:2;ef751fafe6b1:39397 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'ef751fafe6b1,39397,1733831188842' 2024-12-10T11:46:29,414 DEBUG [RS:1;ef751fafe6b1:42797 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'ef751fafe6b1,42797,1733831188809' 2024-12-10T11:46:29,414 DEBUG [RS:2;ef751fafe6b1:39397 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-10T11:46:29,414 DEBUG [RS:1;ef751fafe6b1:42797 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-10T11:46:29,414 DEBUG [RS:2;ef751fafe6b1:39397 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-10T11:46:29,414 DEBUG [RS:1;ef751fafe6b1:42797 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-10T11:46:29,415 DEBUG [RS:2;ef751fafe6b1:39397 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-10T11:46:29,415 DEBUG [RS:1;ef751fafe6b1:42797 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-10T11:46:29,415 DEBUG [RS:2;ef751fafe6b1:39397 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-10T11:46:29,415 DEBUG [RS:1;ef751fafe6b1:42797 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-10T11:46:29,415 DEBUG [RS:2;ef751fafe6b1:39397 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager ef751fafe6b1,39397,1733831188842 2024-12-10T11:46:29,415 DEBUG [RS:1;ef751fafe6b1:42797 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager ef751fafe6b1,42797,1733831188809 2024-12-10T11:46:29,415 DEBUG [RS:2;ef751fafe6b1:39397 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'ef751fafe6b1,39397,1733831188842' 2024-12-10T11:46:29,415 DEBUG [RS:1;ef751fafe6b1:42797 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'ef751fafe6b1,42797,1733831188809' 2024-12-10T11:46:29,415 DEBUG [RS:2;ef751fafe6b1:39397 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-10T11:46:29,415 DEBUG [RS:1;ef751fafe6b1:42797 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-10T11:46:29,416 DEBUG [RS:2;ef751fafe6b1:39397 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-10T11:46:29,416 DEBUG [RS:1;ef751fafe6b1:42797 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-10T11:46:29,416 DEBUG [RS:2;ef751fafe6b1:39397 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-10T11:46:29,416 DEBUG [RS:1;ef751fafe6b1:42797 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-10T11:46:29,416 INFO [RS:2;ef751fafe6b1:39397 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-10T11:46:29,416 INFO [RS:1;ef751fafe6b1:42797 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-10T11:46:29,416 INFO [RS:1;ef751fafe6b1:42797 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-10T11:46:29,416 INFO [RS:2;ef751fafe6b1:39397 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-10T11:46:29,419 INFO [RS:0;ef751fafe6b1:41813 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-10T11:46:29,419 INFO [RS:0;ef751fafe6b1:41813 {}] regionserver.HRegionServer(1482): Serving as ef751fafe6b1,41813,1733831188771, RpcServer on ef751fafe6b1/172.17.0.2:41813, sessionid=0x1000f9a1f020001 2024-12-10T11:46:29,419 DEBUG [RS:0;ef751fafe6b1:41813 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-10T11:46:29,419 DEBUG [RS:0;ef751fafe6b1:41813 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager ef751fafe6b1,41813,1733831188771 2024-12-10T11:46:29,419 DEBUG [RS:0;ef751fafe6b1:41813 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'ef751fafe6b1,41813,1733831188771' 2024-12-10T11:46:29,419 DEBUG [RS:0;ef751fafe6b1:41813 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-10T11:46:29,420 DEBUG [RS:0;ef751fafe6b1:41813 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-10T11:46:29,421 DEBUG [RS:0;ef751fafe6b1:41813 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-10T11:46:29,421 DEBUG [RS:0;ef751fafe6b1:41813 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-10T11:46:29,421 DEBUG [RS:0;ef751fafe6b1:41813 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager ef751fafe6b1,41813,1733831188771 2024-12-10T11:46:29,421 DEBUG [RS:0;ef751fafe6b1:41813 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'ef751fafe6b1,41813,1733831188771' 2024-12-10T11:46:29,421 DEBUG [RS:0;ef751fafe6b1:41813 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-10T11:46:29,422 DEBUG [RS:0;ef751fafe6b1:41813 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-10T11:46:29,422 DEBUG [RS:0;ef751fafe6b1:41813 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-10T11:46:29,422 INFO [RS:0;ef751fafe6b1:41813 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-10T11:46:29,422 INFO [RS:0;ef751fafe6b1:41813 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-10T11:46:29,439 WARN [ef751fafe6b1:40575 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-10T11:46:29,520 INFO [RS:2;ef751fafe6b1:39397 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=ef751fafe6b1%2C39397%2C1733831188842, suffix=, logDir=hdfs://localhost:37145/user/jenkins/test-data/0a18477c-72b4-afca-f745-effa86d33fc6/WALs/ef751fafe6b1,39397,1733831188842, archiveDir=hdfs://localhost:37145/user/jenkins/test-data/0a18477c-72b4-afca-f745-effa86d33fc6/oldWALs, maxLogs=32 2024-12-10T11:46:29,520 INFO [RS:1;ef751fafe6b1:42797 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=ef751fafe6b1%2C42797%2C1733831188809, suffix=, logDir=hdfs://localhost:37145/user/jenkins/test-data/0a18477c-72b4-afca-f745-effa86d33fc6/WALs/ef751fafe6b1,42797,1733831188809, archiveDir=hdfs://localhost:37145/user/jenkins/test-data/0a18477c-72b4-afca-f745-effa86d33fc6/oldWALs, maxLogs=32 2024-12-10T11:46:29,523 INFO [RS:2;ef751fafe6b1:39397 {}] monitor.StreamSlowMonitor(122): New stream slow monitor ef751fafe6b1%2C39397%2C1733831188842.1733831189523 2024-12-10T11:46:29,524 INFO [RS:1;ef751fafe6b1:42797 {}] monitor.StreamSlowMonitor(122): New stream slow monitor ef751fafe6b1%2C42797%2C1733831188809.1733831189524 2024-12-10T11:46:29,526 INFO [RS:0;ef751fafe6b1:41813 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=ef751fafe6b1%2C41813%2C1733831188771, suffix=, logDir=hdfs://localhost:37145/user/jenkins/test-data/0a18477c-72b4-afca-f745-effa86d33fc6/WALs/ef751fafe6b1,41813,1733831188771, archiveDir=hdfs://localhost:37145/user/jenkins/test-data/0a18477c-72b4-afca-f745-effa86d33fc6/oldWALs, maxLogs=32 2024-12-10T11:46:29,527 INFO [RS:0;ef751fafe6b1:41813 {}] monitor.StreamSlowMonitor(122): New stream slow monitor ef751fafe6b1%2C41813%2C1733831188771.1733831189527 2024-12-10T11:46:29,532 INFO [RS:1;ef751fafe6b1:42797 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/0a18477c-72b4-afca-f745-effa86d33fc6/WALs/ef751fafe6b1,42797,1733831188809/ef751fafe6b1%2C42797%2C1733831188809.1733831189524 2024-12-10T11:46:29,538 DEBUG [RS:1;ef751fafe6b1:42797 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40041:40041),(127.0.0.1/127.0.0.1:36023:36023),(127.0.0.1/127.0.0.1:39175:39175)] 2024-12-10T11:46:29,539 INFO [RS:2;ef751fafe6b1:39397 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/0a18477c-72b4-afca-f745-effa86d33fc6/WALs/ef751fafe6b1,39397,1733831188842/ef751fafe6b1%2C39397%2C1733831188842.1733831189523 2024-12-10T11:46:29,542 DEBUG [RS:2;ef751fafe6b1:39397 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36023:36023),(127.0.0.1/127.0.0.1:39175:39175),(127.0.0.1/127.0.0.1:40041:40041)] 2024-12-10T11:46:29,542 INFO [RS:0;ef751fafe6b1:41813 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/0a18477c-72b4-afca-f745-effa86d33fc6/WALs/ef751fafe6b1,41813,1733831188771/ef751fafe6b1%2C41813%2C1733831188771.1733831189527 2024-12-10T11:46:29,544 DEBUG [RS:0;ef751fafe6b1:41813 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36023:36023),(127.0.0.1/127.0.0.1:39175:39175),(127.0.0.1/127.0.0.1:40041:40041)] 2024-12-10T11:46:29,675 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-10T11:46:29,685 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-10T11:46:29,689 DEBUG [ef751fafe6b1:40575 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=3, allServersCount=3 2024-12-10T11:46:29,689 DEBUG [ef751fafe6b1:40575 {}] balancer.BalancerClusterState(204): Hosts are {ef751fafe6b1=0} racks are {/default-rack=0} 2024-12-10T11:46:29,691 DEBUG [ef751fafe6b1:40575 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-10T11:46:29,692 DEBUG [ef751fafe6b1:40575 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-10T11:46:29,692 DEBUG [ef751fafe6b1:40575 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-10T11:46:29,692 DEBUG [ef751fafe6b1:40575 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-10T11:46:29,692 DEBUG [ef751fafe6b1:40575 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-10T11:46:29,692 DEBUG [ef751fafe6b1:40575 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-10T11:46:29,692 INFO [ef751fafe6b1:40575 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-10T11:46:29,692 INFO [ef751fafe6b1:40575 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-10T11:46:29,692 INFO [ef751fafe6b1:40575 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-10T11:46:29,692 DEBUG [ef751fafe6b1:40575 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-10T11:46:29,692 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=ef751fafe6b1,41813,1733831188771 2024-12-10T11:46:29,694 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as ef751fafe6b1,41813,1733831188771, state=OPENING 2024-12-10T11:46:29,716 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-10T11:46:29,724 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41813-0x1000f9a1f020001, quorum=127.0.0.1:59194, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T11:46:29,724 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39397-0x1000f9a1f020003, quorum=127.0.0.1:59194, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T11:46:29,724 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42797-0x1000f9a1f020002, quorum=127.0.0.1:59194, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T11:46:29,724 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40575-0x1000f9a1f020000, quorum=127.0.0.1:59194, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T11:46:29,725 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-10T11:46:29,725 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-10T11:46:29,725 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=ef751fafe6b1,41813,1733831188771}] 2024-12-10T11:46:29,725 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-10T11:46:29,725 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-10T11:46:29,725 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-10T11:46:29,728 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-10T11:46:29,729 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-10T11:46:29,880 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-10T11:46:29,884 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41997, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-10T11:46:29,893 INFO [RS_OPEN_META-regionserver/ef751fafe6b1:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-10T11:46:29,894 INFO [RS_OPEN_META-regionserver/ef751fafe6b1:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-10T11:46:29,898 INFO [RS_OPEN_META-regionserver/ef751fafe6b1:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=ef751fafe6b1%2C41813%2C1733831188771.meta, suffix=.meta, logDir=hdfs://localhost:37145/user/jenkins/test-data/0a18477c-72b4-afca-f745-effa86d33fc6/WALs/ef751fafe6b1,41813,1733831188771, archiveDir=hdfs://localhost:37145/user/jenkins/test-data/0a18477c-72b4-afca-f745-effa86d33fc6/oldWALs, maxLogs=32 2024-12-10T11:46:29,899 INFO [RS_OPEN_META-regionserver/ef751fafe6b1:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor ef751fafe6b1%2C41813%2C1733831188771.meta.1733831189899.meta 2024-12-10T11:46:29,907 INFO [RS_OPEN_META-regionserver/ef751fafe6b1:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/0a18477c-72b4-afca-f745-effa86d33fc6/WALs/ef751fafe6b1,41813,1733831188771/ef751fafe6b1%2C41813%2C1733831188771.meta.1733831189899.meta 2024-12-10T11:46:29,908 DEBUG [RS_OPEN_META-regionserver/ef751fafe6b1:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39175:39175),(127.0.0.1/127.0.0.1:40041:40041),(127.0.0.1/127.0.0.1:36023:36023)] 2024-12-10T11:46:29,908 DEBUG [RS_OPEN_META-regionserver/ef751fafe6b1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-10T11:46:29,909 DEBUG [RS_OPEN_META-regionserver/ef751fafe6b1:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-10T11:46:29,909 DEBUG [RS_OPEN_META-regionserver/ef751fafe6b1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-10T11:46:29,909 INFO [RS_OPEN_META-regionserver/ef751fafe6b1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-10T11:46:29,909 DEBUG [RS_OPEN_META-regionserver/ef751fafe6b1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-10T11:46:29,909 DEBUG [RS_OPEN_META-regionserver/ef751fafe6b1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T11:46:29,909 DEBUG [RS_OPEN_META-regionserver/ef751fafe6b1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-10T11:46:29,909 DEBUG [RS_OPEN_META-regionserver/ef751fafe6b1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-10T11:46:29,911 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-10T11:46:29,912 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-10T11:46:29,913 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T11:46:29,913 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-10T11:46:29,913 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-10T11:46:29,914 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-10T11:46:29,914 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T11:46:29,915 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-10T11:46:29,915 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-10T11:46:29,916 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-10T11:46:29,916 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T11:46:29,916 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-10T11:46:29,916 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-10T11:46:29,917 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-10T11:46:29,917 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T11:46:29,918 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-10T11:46:29,918 DEBUG [RS_OPEN_META-regionserver/ef751fafe6b1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-10T11:46:29,919 DEBUG [RS_OPEN_META-regionserver/ef751fafe6b1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37145/user/jenkins/test-data/0a18477c-72b4-afca-f745-effa86d33fc6/data/hbase/meta/1588230740 2024-12-10T11:46:29,920 DEBUG [RS_OPEN_META-regionserver/ef751fafe6b1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37145/user/jenkins/test-data/0a18477c-72b4-afca-f745-effa86d33fc6/data/hbase/meta/1588230740 2024-12-10T11:46:29,922 DEBUG [RS_OPEN_META-regionserver/ef751fafe6b1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-10T11:46:29,922 DEBUG [RS_OPEN_META-regionserver/ef751fafe6b1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-10T11:46:29,922 DEBUG [RS_OPEN_META-regionserver/ef751fafe6b1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-10T11:46:29,924 DEBUG [RS_OPEN_META-regionserver/ef751fafe6b1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-10T11:46:29,925 INFO [RS_OPEN_META-regionserver/ef751fafe6b1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=62700894, jitterRate=-0.06568387150764465}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-10T11:46:29,925 DEBUG [RS_OPEN_META-regionserver/ef751fafe6b1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-10T11:46:29,926 DEBUG [RS_OPEN_META-regionserver/ef751fafe6b1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733831189909Writing region info on filesystem at 1733831189909Initializing all the Stores at 1733831189911 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733831189911Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733831189911Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733831189911Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733831189911Cleaning up temporary data from old regions at 1733831189922 (+11 ms)Running coprocessor post-open hooks at 1733831189925 (+3 ms)Region opened successfully at 1733831189926 (+1 ms) 2024-12-10T11:46:29,927 INFO [RS_OPEN_META-regionserver/ef751fafe6b1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733831189879 2024-12-10T11:46:29,930 DEBUG [RS_OPEN_META-regionserver/ef751fafe6b1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-10T11:46:29,930 INFO [RS_OPEN_META-regionserver/ef751fafe6b1:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-10T11:46:29,931 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=ef751fafe6b1,41813,1733831188771 2024-12-10T11:46:29,933 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as ef751fafe6b1,41813,1733831188771, state=OPEN 2024-12-10T11:46:29,953 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41813-0x1000f9a1f020001, quorum=127.0.0.1:59194, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-10T11:46:29,953 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39397-0x1000f9a1f020003, quorum=127.0.0.1:59194, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-10T11:46:29,953 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40575-0x1000f9a1f020000, quorum=127.0.0.1:59194, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-10T11:46:29,954 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42797-0x1000f9a1f020002, quorum=127.0.0.1:59194, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-10T11:46:29,954 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=ef751fafe6b1,41813,1733831188771 2024-12-10T11:46:29,954 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-10T11:46:29,954 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-10T11:46:29,954 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-10T11:46:29,954 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-10T11:46:29,961 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-10T11:46:29,961 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=ef751fafe6b1,41813,1733831188771 in 229 msec 2024-12-10T11:46:29,966 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-10T11:46:29,966 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 676 msec 2024-12-10T11:46:29,967 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-10T11:46:29,967 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-10T11:46:29,969 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-10T11:46:29,969 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=ef751fafe6b1,41813,1733831188771, seqNum=-1] 2024-12-10T11:46:29,969 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T11:46:29,971 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34067, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T11:46:29,979 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 748 msec 2024-12-10T11:46:29,979 INFO [master/ef751fafe6b1:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733831189979, completionTime=-1 2024-12-10T11:46:29,979 INFO [master/ef751fafe6b1:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=3; waited=0ms, expected min=3 server(s), max=3 server(s), master is running 2024-12-10T11:46:29,979 DEBUG [master/ef751fafe6b1:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-10T11:46:29,982 INFO [master/ef751fafe6b1:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=3 2024-12-10T11:46:29,982 INFO [master/ef751fafe6b1:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733831249982 2024-12-10T11:46:29,982 INFO [master/ef751fafe6b1:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733831309982 2024-12-10T11:46:29,982 INFO [master/ef751fafe6b1:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-12-10T11:46:29,982 DEBUG [master/ef751fafe6b1:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 1588230740 changed from -1.0 to 0.0, refreshing cache 2024-12-10T11:46:29,983 INFO [master/ef751fafe6b1:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ef751fafe6b1,40575,1733831188608-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-10T11:46:29,983 INFO [master/ef751fafe6b1:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ef751fafe6b1,40575,1733831188608-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-10T11:46:29,983 INFO [master/ef751fafe6b1:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ef751fafe6b1,40575,1733831188608-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-10T11:46:29,983 INFO [master/ef751fafe6b1:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-ef751fafe6b1:40575, period=300000, unit=MILLISECONDS is enabled. 2024-12-10T11:46:29,983 INFO [master/ef751fafe6b1:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-10T11:46:29,983 INFO [master/ef751fafe6b1:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-10T11:46:29,986 DEBUG [master/ef751fafe6b1:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-10T11:46:29,989 INFO [master/ef751fafe6b1:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.081sec 2024-12-10T11:46:29,990 INFO [master/ef751fafe6b1:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-10T11:46:29,990 INFO [master/ef751fafe6b1:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-10T11:46:29,990 INFO [master/ef751fafe6b1:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-10T11:46:29,990 INFO [master/ef751fafe6b1:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-10T11:46:29,990 INFO [master/ef751fafe6b1:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-10T11:46:29,990 INFO [master/ef751fafe6b1:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ef751fafe6b1,40575,1733831188608-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-10T11:46:29,990 INFO [master/ef751fafe6b1:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ef751fafe6b1,40575,1733831188608-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-10T11:46:29,993 DEBUG [master/ef751fafe6b1:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-10T11:46:29,993 INFO [master/ef751fafe6b1:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-10T11:46:29,993 INFO [master/ef751fafe6b1:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ef751fafe6b1,40575,1733831188608-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-10T11:46:30,069 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@376b00bc, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T11:46:30,069 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request ef751fafe6b1,40575,-1 for getting cluster id 2024-12-10T11:46:30,069 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-10T11:46:30,070 DEBUG [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '8b1c8a55-ebc6-4962-bb92-423921e74732' 2024-12-10T11:46:30,071 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-10T11:46:30,071 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "8b1c8a55-ebc6-4962-bb92-423921e74732" 2024-12-10T11:46:30,071 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@67dbc74c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T11:46:30,071 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [ef751fafe6b1,40575,-1] 2024-12-10T11:46:30,071 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-10T11:46:30,072 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T11:46:30,073 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46890, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-10T11:46:30,074 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6a6e0fe9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T11:46:30,074 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-10T11:46:30,075 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=ef751fafe6b1,41813,1733831188771, seqNum=-1] 2024-12-10T11:46:30,076 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T11:46:30,077 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45014, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T11:46:30,079 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=ef751fafe6b1,40575,1733831188608 2024-12-10T11:46:30,080 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-10T11:46:30,081 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] client.AsyncConnectionImpl(321): The fetched master address is ef751fafe6b1,40575,1733831188608 2024-12-10T11:46:30,081 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@1d4e0dc1 2024-12-10T11:46:30,082 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-10T11:46:30,084 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46902, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-10T11:46:30,084 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40575 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-10T11:46:30,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40575 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC 2024-12-10T11:46:30,089 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_PRE_OPERATION 2024-12-10T11:46:30,089 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T11:46:30,089 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40575 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestHBaseWalOnEC" procId is: 4 2024-12-10T11:46:30,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40575 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-10T11:46:30,092 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-10T11:46:30,102 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38591 is added to blk_1073741837_1013 (size=392) 2024-12-10T11:46:30,102 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34141 is added to blk_1073741837_1013 (size=392) 2024-12-10T11:46:30,103 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43989 is added to blk_1073741837_1013 (size=392) 2024-12-10T11:46:30,104 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => c17522ba59b3d46adce0b197b761698b, NAME => 'TestHBaseWalOnEC,,1733831190084.c17522ba59b3d46adce0b197b761698b.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37145/user/jenkins/test-data/0a18477c-72b4-afca-f745-effa86d33fc6 2024-12-10T11:46:30,113 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43989 is added to blk_1073741838_1014 (size=51) 2024-12-10T11:46:30,113 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34141 is added to blk_1073741838_1014 (size=51) 2024-12-10T11:46:30,113 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38591 is added to blk_1073741838_1014 (size=51) 2024-12-10T11:46:30,114 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1733831190084.c17522ba59b3d46adce0b197b761698b.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T11:46:30,114 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1722): Closing c17522ba59b3d46adce0b197b761698b, disabling compactions & flushes 2024-12-10T11:46:30,114 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1733831190084.c17522ba59b3d46adce0b197b761698b. 2024-12-10T11:46:30,114 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1733831190084.c17522ba59b3d46adce0b197b761698b. 2024-12-10T11:46:30,114 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1733831190084.c17522ba59b3d46adce0b197b761698b. after waiting 0 ms 2024-12-10T11:46:30,114 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1733831190084.c17522ba59b3d46adce0b197b761698b. 2024-12-10T11:46:30,114 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1733831190084.c17522ba59b3d46adce0b197b761698b. 2024-12-10T11:46:30,114 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1676): Region close journal for c17522ba59b3d46adce0b197b761698b: Waiting for close lock at 1733831190114Disabling compacts and flushes for region at 1733831190114Disabling writes for close at 1733831190114Writing region close event to WAL at 1733831190114Closed at 1733831190114 2024-12-10T11:46:30,116 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ADD_TO_META 2024-12-10T11:46:30,116 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestHBaseWalOnEC,,1733831190084.c17522ba59b3d46adce0b197b761698b.","families":{"info":[{"qualifier":"regioninfo","vlen":50,"tag":[],"timestamp":"1733831190116"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733831190116"}]},"ts":"1733831190116"} 2024-12-10T11:46:30,119 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-10T11:46:30,121 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-10T11:46:30,122 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733831190122"}]},"ts":"1733831190122"} 2024-12-10T11:46:30,125 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLING in hbase:meta 2024-12-10T11:46:30,125 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {ef751fafe6b1=0} racks are {/default-rack=0} 2024-12-10T11:46:30,126 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-10T11:46:30,126 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-10T11:46:30,126 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-10T11:46:30,126 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-10T11:46:30,126 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-10T11:46:30,126 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-10T11:46:30,126 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-10T11:46:30,126 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-10T11:46:30,126 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-10T11:46:30,126 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-10T11:46:30,127 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=c17522ba59b3d46adce0b197b761698b, ASSIGN}] 2024-12-10T11:46:30,129 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=c17522ba59b3d46adce0b197b761698b, ASSIGN 2024-12-10T11:46:30,131 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=c17522ba59b3d46adce0b197b761698b, ASSIGN; state=OFFLINE, location=ef751fafe6b1,41813,1733831188771; forceNewPlan=false, retain=false 2024-12-10T11:46:30,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40575 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-10T11:46:30,281 INFO [ef751fafe6b1:40575 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-10T11:46:30,282 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=c17522ba59b3d46adce0b197b761698b, regionState=OPENING, regionLocation=ef751fafe6b1,41813,1733831188771 2024-12-10T11:46:30,286 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-10-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=c17522ba59b3d46adce0b197b761698b, ASSIGN because future has completed 2024-12-10T11:46:30,286 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure c17522ba59b3d46adce0b197b761698b, server=ef751fafe6b1,41813,1733831188771}] 2024-12-10T11:46:30,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40575 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-10T11:46:30,452 INFO [RS_OPEN_REGION-regionserver/ef751fafe6b1:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestHBaseWalOnEC,,1733831190084.c17522ba59b3d46adce0b197b761698b. 2024-12-10T11:46:30,452 DEBUG [RS_OPEN_REGION-regionserver/ef751fafe6b1:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => c17522ba59b3d46adce0b197b761698b, NAME => 'TestHBaseWalOnEC,,1733831190084.c17522ba59b3d46adce0b197b761698b.', STARTKEY => '', ENDKEY => ''} 2024-12-10T11:46:30,453 DEBUG [RS_OPEN_REGION-regionserver/ef751fafe6b1:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestHBaseWalOnEC c17522ba59b3d46adce0b197b761698b 2024-12-10T11:46:30,453 DEBUG [RS_OPEN_REGION-regionserver/ef751fafe6b1:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1733831190084.c17522ba59b3d46adce0b197b761698b.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T11:46:30,453 DEBUG [RS_OPEN_REGION-regionserver/ef751fafe6b1:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for c17522ba59b3d46adce0b197b761698b 2024-12-10T11:46:30,453 DEBUG [RS_OPEN_REGION-regionserver/ef751fafe6b1:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for c17522ba59b3d46adce0b197b761698b 2024-12-10T11:46:30,455 INFO [StoreOpener-c17522ba59b3d46adce0b197b761698b-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region c17522ba59b3d46adce0b197b761698b 2024-12-10T11:46:30,457 INFO [StoreOpener-c17522ba59b3d46adce0b197b761698b-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region c17522ba59b3d46adce0b197b761698b columnFamilyName cf 2024-12-10T11:46:30,457 DEBUG [StoreOpener-c17522ba59b3d46adce0b197b761698b-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T11:46:30,458 INFO [StoreOpener-c17522ba59b3d46adce0b197b761698b-1 {}] regionserver.HStore(327): Store=c17522ba59b3d46adce0b197b761698b/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T11:46:30,458 DEBUG [RS_OPEN_REGION-regionserver/ef751fafe6b1:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for c17522ba59b3d46adce0b197b761698b 2024-12-10T11:46:30,459 DEBUG [RS_OPEN_REGION-regionserver/ef751fafe6b1:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37145/user/jenkins/test-data/0a18477c-72b4-afca-f745-effa86d33fc6/data/default/TestHBaseWalOnEC/c17522ba59b3d46adce0b197b761698b 2024-12-10T11:46:30,459 DEBUG [RS_OPEN_REGION-regionserver/ef751fafe6b1:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37145/user/jenkins/test-data/0a18477c-72b4-afca-f745-effa86d33fc6/data/default/TestHBaseWalOnEC/c17522ba59b3d46adce0b197b761698b 2024-12-10T11:46:30,459 DEBUG [RS_OPEN_REGION-regionserver/ef751fafe6b1:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for c17522ba59b3d46adce0b197b761698b 2024-12-10T11:46:30,459 DEBUG [RS_OPEN_REGION-regionserver/ef751fafe6b1:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for c17522ba59b3d46adce0b197b761698b 2024-12-10T11:46:30,461 DEBUG [RS_OPEN_REGION-regionserver/ef751fafe6b1:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for c17522ba59b3d46adce0b197b761698b 2024-12-10T11:46:30,464 DEBUG [RS_OPEN_REGION-regionserver/ef751fafe6b1:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37145/user/jenkins/test-data/0a18477c-72b4-afca-f745-effa86d33fc6/data/default/TestHBaseWalOnEC/c17522ba59b3d46adce0b197b761698b/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-10T11:46:30,464 INFO [RS_OPEN_REGION-regionserver/ef751fafe6b1:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened c17522ba59b3d46adce0b197b761698b; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=61748147, jitterRate=-0.07988090813159943}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-10T11:46:30,465 DEBUG [RS_OPEN_REGION-regionserver/ef751fafe6b1:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for c17522ba59b3d46adce0b197b761698b 2024-12-10T11:46:30,465 DEBUG [RS_OPEN_REGION-regionserver/ef751fafe6b1:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for c17522ba59b3d46adce0b197b761698b: Running coprocessor pre-open hook at 1733831190453Writing region info on filesystem at 1733831190453Initializing all the Stores at 1733831190455 (+2 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733831190455Cleaning up temporary data from old regions at 1733831190459 (+4 ms)Running coprocessor post-open hooks at 1733831190465 (+6 ms)Region opened successfully at 1733831190465 2024-12-10T11:46:30,467 INFO [RS_OPEN_REGION-regionserver/ef751fafe6b1:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestHBaseWalOnEC,,1733831190084.c17522ba59b3d46adce0b197b761698b., pid=6, masterSystemTime=1733831190441 2024-12-10T11:46:30,470 DEBUG [RS_OPEN_REGION-regionserver/ef751fafe6b1:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestHBaseWalOnEC,,1733831190084.c17522ba59b3d46adce0b197b761698b. 2024-12-10T11:46:30,470 INFO [RS_OPEN_REGION-regionserver/ef751fafe6b1:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestHBaseWalOnEC,,1733831190084.c17522ba59b3d46adce0b197b761698b. 2024-12-10T11:46:30,472 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=c17522ba59b3d46adce0b197b761698b, regionState=OPEN, openSeqNum=2, regionLocation=ef751fafe6b1,41813,1733831188771 2024-12-10T11:46:30,475 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-10-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure c17522ba59b3d46adce0b197b761698b, server=ef751fafe6b1,41813,1733831188771 because future has completed 2024-12-10T11:46:30,481 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-10T11:46:30,481 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure c17522ba59b3d46adce0b197b761698b, server=ef751fafe6b1,41813,1733831188771 in 191 msec 2024-12-10T11:46:30,485 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-10T11:46:30,485 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=c17522ba59b3d46adce0b197b761698b, ASSIGN in 354 msec 2024-12-10T11:46:30,487 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-10T11:46:30,487 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733831190487"}]},"ts":"1733831190487"} 2024-12-10T11:46:30,490 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLED in hbase:meta 2024-12-10T11:46:30,491 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_POST_OPERATION 2024-12-10T11:46:30,494 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC in 406 msec 2024-12-10T11:46:30,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40575 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-10T11:46:30,716 INFO [RPCClient-NioEventLoopGroup-6-8 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestHBaseWalOnEC completed 2024-12-10T11:46:30,716 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table TestHBaseWalOnEC get assigned. Timeout = 60000ms 2024-12-10T11:46:30,717 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-10T11:46:30,722 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table TestHBaseWalOnEC assigned to meta. Checking AM states. 2024-12-10T11:46:30,722 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-10T11:46:30,722 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table TestHBaseWalOnEC assigned. 2024-12-10T11:46:30,729 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestHBaseWalOnEC', row='row', locateType=CURRENT is [region=TestHBaseWalOnEC,,1733831190084.c17522ba59b3d46adce0b197b761698b., hostname=ef751fafe6b1,41813,1733831188771, seqNum=2] 2024-12-10T11:46:30,732 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40575 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestHBaseWalOnEC 2024-12-10T11:46:30,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40575 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC 2024-12-10T11:46:30,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40575 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-10T11:46:30,735 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_PREPARE 2024-12-10T11:46:30,736 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-10T11:46:30,736 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-10T11:46:30,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40575 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-10T11:46:30,891 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41813 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-12-10T11:46:30,892 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ef751fafe6b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestHBaseWalOnEC,,1733831190084.c17522ba59b3d46adce0b197b761698b. 2024-12-10T11:46:30,892 INFO [RS_FLUSH_OPERATIONS-regionserver/ef751fafe6b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing c17522ba59b3d46adce0b197b761698b 1/1 column families, dataSize=32 B heapSize=360 B 2024-12-10T11:46:30,909 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ef751fafe6b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37145/user/jenkins/test-data/0a18477c-72b4-afca-f745-effa86d33fc6/data/default/TestHBaseWalOnEC/c17522ba59b3d46adce0b197b761698b/.tmp/cf/21f106b31ba24ee0aa1ad7f46569ddf7 is 36, key is row/cf:cq/1733831190730/Put/seqid=0 2024-12-10T11:46:30,917 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38591 is added to blk_1073741839_1015 (size=4787) 2024-12-10T11:46:30,917 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43989 is added to blk_1073741839_1015 (size=4787) 2024-12-10T11:46:30,917 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34141 is added to blk_1073741839_1015 (size=4787) 2024-12-10T11:46:30,917 INFO [RS_FLUSH_OPERATIONS-regionserver/ef751fafe6b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=32 B at sequenceid=5 (bloomFilter=false), to=hdfs://localhost:37145/user/jenkins/test-data/0a18477c-72b4-afca-f745-effa86d33fc6/data/default/TestHBaseWalOnEC/c17522ba59b3d46adce0b197b761698b/.tmp/cf/21f106b31ba24ee0aa1ad7f46569ddf7 2024-12-10T11:46:30,925 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ef751fafe6b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37145/user/jenkins/test-data/0a18477c-72b4-afca-f745-effa86d33fc6/data/default/TestHBaseWalOnEC/c17522ba59b3d46adce0b197b761698b/.tmp/cf/21f106b31ba24ee0aa1ad7f46569ddf7 as hdfs://localhost:37145/user/jenkins/test-data/0a18477c-72b4-afca-f745-effa86d33fc6/data/default/TestHBaseWalOnEC/c17522ba59b3d46adce0b197b761698b/cf/21f106b31ba24ee0aa1ad7f46569ddf7 2024-12-10T11:46:30,932 INFO [RS_FLUSH_OPERATIONS-regionserver/ef751fafe6b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37145/user/jenkins/test-data/0a18477c-72b4-afca-f745-effa86d33fc6/data/default/TestHBaseWalOnEC/c17522ba59b3d46adce0b197b761698b/cf/21f106b31ba24ee0aa1ad7f46569ddf7, entries=1, sequenceid=5, filesize=4.7 K 2024-12-10T11:46:30,933 INFO [RS_FLUSH_OPERATIONS-regionserver/ef751fafe6b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~32 B/32, heapSize ~344 B/344, currentSize=0 B/0 for c17522ba59b3d46adce0b197b761698b in 41ms, sequenceid=5, compaction requested=false 2024-12-10T11:46:30,934 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ef751fafe6b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for c17522ba59b3d46adce0b197b761698b: 2024-12-10T11:46:30,934 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ef751fafe6b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestHBaseWalOnEC,,1733831190084.c17522ba59b3d46adce0b197b761698b. 2024-12-10T11:46:30,934 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ef751fafe6b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-12-10T11:46:30,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40575 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-12-10T11:46:30,939 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-12-10T11:46:30,939 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 200 msec 2024-12-10T11:46:30,942 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC in 208 msec 2024-12-10T11:46:31,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40575 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-10T11:46:31,057 INFO [RPCClient-NioEventLoopGroup-6-8 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestHBaseWalOnEC completed 2024-12-10T11:46:31,064 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-10T11:46:31,064 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-10T11:46:31,064 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-10T11:46:31,064 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T11:46:31,065 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T11:46:31,065 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-10T11:46:31,065 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-10T11:46:31,065 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1768093097, stopped=false 2024-12-10T11:46:31,065 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=ef751fafe6b1,40575,1733831188608 2024-12-10T11:46:31,120 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39397-0x1000f9a1f020003, quorum=127.0.0.1:59194, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-10T11:46:31,120 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41813-0x1000f9a1f020001, quorum=127.0.0.1:59194, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-10T11:46:31,120 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40575-0x1000f9a1f020000, quorum=127.0.0.1:59194, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-10T11:46:31,120 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42797-0x1000f9a1f020002, quorum=127.0.0.1:59194, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-10T11:46:31,120 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39397-0x1000f9a1f020003, quorum=127.0.0.1:59194, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T11:46:31,120 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40575-0x1000f9a1f020000, quorum=127.0.0.1:59194, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T11:46:31,120 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41813-0x1000f9a1f020001, quorum=127.0.0.1:59194, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T11:46:31,120 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-10T11:46:31,120 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42797-0x1000f9a1f020002, quorum=127.0.0.1:59194, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T11:46:31,120 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-10T11:46:31,121 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-10T11:46:31,121 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:40575-0x1000f9a1f020000, quorum=127.0.0.1:59194, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-10T11:46:31,121 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:42797-0x1000f9a1f020002, quorum=127.0.0.1:59194, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-10T11:46:31,121 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:41813-0x1000f9a1f020001, quorum=127.0.0.1:59194, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-10T11:46:31,121 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T11:46:31,121 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'ef751fafe6b1,41813,1733831188771' ***** 2024-12-10T11:46:31,121 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-10T11:46:31,121 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'ef751fafe6b1,42797,1733831188809' ***** 2024-12-10T11:46:31,122 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-10T11:46:31,122 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:39397-0x1000f9a1f020003, quorum=127.0.0.1:59194, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-10T11:46:31,122 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'ef751fafe6b1,39397,1733831188842' ***** 2024-12-10T11:46:31,122 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-10T11:46:31,122 INFO [RS:1;ef751fafe6b1:42797 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-10T11:46:31,122 INFO [RS:0;ef751fafe6b1:41813 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-10T11:46:31,122 INFO [RS:1;ef751fafe6b1:42797 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-10T11:46:31,122 INFO [RS:0;ef751fafe6b1:41813 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-10T11:46:31,122 INFO [RS:2;ef751fafe6b1:39397 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-10T11:46:31,122 INFO [RS:1;ef751fafe6b1:42797 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-10T11:46:31,123 INFO [RS:1;ef751fafe6b1:42797 {}] regionserver.HRegionServer(959): stopping server ef751fafe6b1,42797,1733831188809 2024-12-10T11:46:31,122 INFO [RS:2;ef751fafe6b1:39397 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-10T11:46:31,123 INFO [RS:0;ef751fafe6b1:41813 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-10T11:46:31,122 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-10T11:46:31,123 INFO [RS:2;ef751fafe6b1:39397 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-10T11:46:31,123 INFO [RS:1;ef751fafe6b1:42797 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-10T11:46:31,123 INFO [RS:2;ef751fafe6b1:39397 {}] regionserver.HRegionServer(959): stopping server ef751fafe6b1,39397,1733831188842 2024-12-10T11:46:31,123 INFO [RS:0;ef751fafe6b1:41813 {}] regionserver.HRegionServer(3091): Received CLOSE for c17522ba59b3d46adce0b197b761698b 2024-12-10T11:46:31,123 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-10T11:46:31,123 INFO [RS:2;ef751fafe6b1:39397 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-10T11:46:31,123 INFO [RS:1;ef751fafe6b1:42797 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;ef751fafe6b1:42797. 2024-12-10T11:46:31,123 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-10T11:46:31,123 INFO [RS:2;ef751fafe6b1:39397 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:2;ef751fafe6b1:39397. 2024-12-10T11:46:31,123 DEBUG [RS:1;ef751fafe6b1:42797 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-10T11:46:31,123 DEBUG [RS:2;ef751fafe6b1:39397 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-10T11:46:31,123 DEBUG [RS:1;ef751fafe6b1:42797 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T11:46:31,123 DEBUG [RS:2;ef751fafe6b1:39397 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T11:46:31,123 INFO [RS:0;ef751fafe6b1:41813 {}] regionserver.HRegionServer(959): stopping server ef751fafe6b1,41813,1733831188771 2024-12-10T11:46:31,123 INFO [RS:0;ef751fafe6b1:41813 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-10T11:46:31,123 INFO [RS:1;ef751fafe6b1:42797 {}] regionserver.HRegionServer(976): stopping server ef751fafe6b1,42797,1733831188809; all regions closed. 2024-12-10T11:46:31,123 INFO [RS:2;ef751fafe6b1:39397 {}] regionserver.HRegionServer(976): stopping server ef751fafe6b1,39397,1733831188842; all regions closed. 2024-12-10T11:46:31,124 INFO [RS:0;ef751fafe6b1:41813 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;ef751fafe6b1:41813. 2024-12-10T11:46:31,124 DEBUG [RS_CLOSE_REGION-regionserver/ef751fafe6b1:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing c17522ba59b3d46adce0b197b761698b, disabling compactions & flushes 2024-12-10T11:46:31,124 DEBUG [RS:0;ef751fafe6b1:41813 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-10T11:46:31,124 DEBUG [RS:0;ef751fafe6b1:41813 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T11:46:31,124 INFO [RS_CLOSE_REGION-regionserver/ef751fafe6b1:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1733831190084.c17522ba59b3d46adce0b197b761698b. 2024-12-10T11:46:31,124 DEBUG [RS_CLOSE_REGION-regionserver/ef751fafe6b1:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1733831190084.c17522ba59b3d46adce0b197b761698b. 2024-12-10T11:46:31,124 INFO [RS:0;ef751fafe6b1:41813 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-10T11:46:31,124 INFO [RS:0;ef751fafe6b1:41813 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-10T11:46:31,124 DEBUG [RS_CLOSE_REGION-regionserver/ef751fafe6b1:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1733831190084.c17522ba59b3d46adce0b197b761698b. after waiting 0 ms 2024-12-10T11:46:31,124 INFO [RS:0;ef751fafe6b1:41813 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-10T11:46:31,124 DEBUG [RS_CLOSE_REGION-regionserver/ef751fafe6b1:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1733831190084.c17522ba59b3d46adce0b197b761698b. 2024-12-10T11:46:31,124 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-10T11:46:31,124 INFO [RS:0;ef751fafe6b1:41813 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-10T11:46:31,124 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-10T11:46:31,124 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-10T11:46:31,125 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-10T11:46:31,125 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-10T11:46:31,125 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-10T11:46:31,125 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-10T11:46:31,125 INFO [RS:0;ef751fafe6b1:41813 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-12-10T11:46:31,125 DEBUG [RS:0;ef751fafe6b1:41813 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, c17522ba59b3d46adce0b197b761698b=TestHBaseWalOnEC,,1733831190084.c17522ba59b3d46adce0b197b761698b.} 2024-12-10T11:46:31,125 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-10T11:46:31,125 DEBUG [RS:0;ef751fafe6b1:41813 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, c17522ba59b3d46adce0b197b761698b 2024-12-10T11:46:31,125 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-10T11:46:31,125 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-10T11:46:31,125 DEBUG [RS_CLOSE_META-regionserver/ef751fafe6b1:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-10T11:46:31,125 INFO [RS_CLOSE_META-regionserver/ef751fafe6b1:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-10T11:46:31,125 DEBUG [RS_CLOSE_META-regionserver/ef751fafe6b1:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-10T11:46:31,125 DEBUG [RS_CLOSE_META-regionserver/ef751fafe6b1:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-10T11:46:31,125 DEBUG [RS_CLOSE_META-regionserver/ef751fafe6b1:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-10T11:46:31,126 INFO [RS_CLOSE_META-regionserver/ef751fafe6b1:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.34 KB heapSize=3.38 KB 2024-12-10T11:46:31,129 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38591 is added to blk_1073741834_1010 (size=93) 2024-12-10T11:46:31,129 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34141 is added to blk_1073741834_1010 (size=93) 2024-12-10T11:46:31,130 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43989 is added to blk_1073741833_1009 (size=93) 2024-12-10T11:46:31,130 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38591 is added to blk_1073741833_1009 (size=93) 2024-12-10T11:46:31,130 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43989 is added to blk_1073741834_1010 (size=93) 2024-12-10T11:46:31,130 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34141 is added to blk_1073741833_1009 (size=93) 2024-12-10T11:46:31,132 DEBUG [RS_CLOSE_REGION-regionserver/ef751fafe6b1:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37145/user/jenkins/test-data/0a18477c-72b4-afca-f745-effa86d33fc6/data/default/TestHBaseWalOnEC/c17522ba59b3d46adce0b197b761698b/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-10T11:46:31,133 DEBUG [RS:1;ef751fafe6b1:42797 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/0a18477c-72b4-afca-f745-effa86d33fc6/oldWALs 2024-12-10T11:46:31,133 INFO [RS:1;ef751fafe6b1:42797 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog ef751fafe6b1%2C42797%2C1733831188809:(num 1733831189524) 2024-12-10T11:46:31,133 DEBUG [RS:1;ef751fafe6b1:42797 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T11:46:31,133 INFO [RS:1;ef751fafe6b1:42797 {}] regionserver.LeaseManager(133): Closed leases 2024-12-10T11:46:31,133 INFO [RS:1;ef751fafe6b1:42797 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-10T11:46:31,133 INFO [RS:1;ef751fafe6b1:42797 {}] hbase.ChoreService(370): Chore service for: regionserver/ef751fafe6b1:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-10T11:46:31,133 INFO [RS_CLOSE_REGION-regionserver/ef751fafe6b1:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1733831190084.c17522ba59b3d46adce0b197b761698b. 2024-12-10T11:46:31,133 DEBUG [RS:2;ef751fafe6b1:39397 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/0a18477c-72b4-afca-f745-effa86d33fc6/oldWALs 2024-12-10T11:46:31,133 INFO [RS:1;ef751fafe6b1:42797 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-10T11:46:31,133 INFO [RS:2;ef751fafe6b1:39397 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog ef751fafe6b1%2C39397%2C1733831188842:(num 1733831189523) 2024-12-10T11:46:31,133 DEBUG [RS_CLOSE_REGION-regionserver/ef751fafe6b1:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for c17522ba59b3d46adce0b197b761698b: Waiting for close lock at 1733831191124Running coprocessor pre-close hooks at 1733831191124Disabling compacts and flushes for region at 1733831191124Disabling writes for close at 1733831191124Writing region close event to WAL at 1733831191125 (+1 ms)Running coprocessor post-close hooks at 1733831191133 (+8 ms)Closed at 1733831191133 2024-12-10T11:46:31,133 INFO [RS:1;ef751fafe6b1:42797 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-10T11:46:31,133 DEBUG [RS:2;ef751fafe6b1:39397 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T11:46:31,133 INFO [RS:1;ef751fafe6b1:42797 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-10T11:46:31,133 INFO [RS:1;ef751fafe6b1:42797 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-10T11:46:31,133 INFO [RS:2;ef751fafe6b1:39397 {}] regionserver.LeaseManager(133): Closed leases 2024-12-10T11:46:31,133 DEBUG [RS_CLOSE_REGION-regionserver/ef751fafe6b1:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestHBaseWalOnEC,,1733831190084.c17522ba59b3d46adce0b197b761698b. 2024-12-10T11:46:31,134 INFO [RS:1;ef751fafe6b1:42797 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:42797 2024-12-10T11:46:31,134 INFO [RS:2;ef751fafe6b1:39397 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-10T11:46:31,134 INFO [RS:2;ef751fafe6b1:39397 {}] hbase.ChoreService(370): Chore service for: regionserver/ef751fafe6b1:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-10T11:46:31,134 INFO [regionserver/ef751fafe6b1:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-10T11:46:31,134 INFO [regionserver/ef751fafe6b1:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-10T11:46:31,134 INFO [RS:2;ef751fafe6b1:39397 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-10T11:46:31,134 INFO [RS:2;ef751fafe6b1:39397 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-10T11:46:31,134 INFO [RS:2;ef751fafe6b1:39397 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-10T11:46:31,134 INFO [RS:2;ef751fafe6b1:39397 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-10T11:46:31,134 INFO [RS:2;ef751fafe6b1:39397 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:39397 2024-12-10T11:46:31,145 DEBUG [RS_CLOSE_META-regionserver/ef751fafe6b1:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37145/user/jenkins/test-data/0a18477c-72b4-afca-f745-effa86d33fc6/data/hbase/meta/1588230740/.tmp/info/a08d95bb59c54cfeae0f76b9c300b401 is 153, key is TestHBaseWalOnEC,,1733831190084.c17522ba59b3d46adce0b197b761698b./info:regioninfo/1733831190471/Put/seqid=0 2024-12-10T11:46:31,145 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42797-0x1000f9a1f020002, quorum=127.0.0.1:59194, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/ef751fafe6b1,42797,1733831188809 2024-12-10T11:46:31,145 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40575-0x1000f9a1f020000, quorum=127.0.0.1:59194, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-10T11:46:31,145 INFO [RS:1;ef751fafe6b1:42797 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-10T11:46:31,151 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34141 is added to blk_1073741840_1016 (size=6637) 2024-12-10T11:46:31,151 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38591 is added to blk_1073741840_1016 (size=6637) 2024-12-10T11:46:31,151 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43989 is added to blk_1073741840_1016 (size=6637) 2024-12-10T11:46:31,152 INFO [RS_CLOSE_META-regionserver/ef751fafe6b1:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.18 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:37145/user/jenkins/test-data/0a18477c-72b4-afca-f745-effa86d33fc6/data/hbase/meta/1588230740/.tmp/info/a08d95bb59c54cfeae0f76b9c300b401 2024-12-10T11:46:31,153 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39397-0x1000f9a1f020003, quorum=127.0.0.1:59194, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/ef751fafe6b1,39397,1733831188842 2024-12-10T11:46:31,153 INFO [RS:2;ef751fafe6b1:39397 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-10T11:46:31,162 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [ef751fafe6b1,39397,1733831188842] 2024-12-10T11:46:31,176 DEBUG [RS_CLOSE_META-regionserver/ef751fafe6b1:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37145/user/jenkins/test-data/0a18477c-72b4-afca-f745-effa86d33fc6/data/hbase/meta/1588230740/.tmp/ns/2cd6e80843d5481abf7a3c888ed0087b is 43, key is default/ns:d/1733831189971/Put/seqid=0 2024-12-10T11:46:31,177 INFO [regionserver/ef751fafe6b1:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-10T11:46:31,177 INFO [regionserver/ef751fafe6b1:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-10T11:46:31,177 WARN [IPC Server handler 4 on default port 37145 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-10T11:46:31,178 WARN [IPC Server handler 4 on default port 37145 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-10T11:46:31,178 WARN [IPC Server handler 4 on default port 37145 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-10T11:46:31,178 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/ef751fafe6b1,39397,1733831188842 already deleted, retry=false 2024-12-10T11:46:31,178 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; ef751fafe6b1,39397,1733831188842 expired; onlineServers=2 2024-12-10T11:46:31,178 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [ef751fafe6b1,42797,1733831188809] 2024-12-10T11:46:31,182 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34141 is added to blk_1073741841_1017 (size=5153) 2024-12-10T11:46:31,182 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38591 is added to blk_1073741841_1017 (size=5153) 2024-12-10T11:46:31,183 INFO [RS_CLOSE_META-regionserver/ef751fafe6b1:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:37145/user/jenkins/test-data/0a18477c-72b4-afca-f745-effa86d33fc6/data/hbase/meta/1588230740/.tmp/ns/2cd6e80843d5481abf7a3c888ed0087b 2024-12-10T11:46:31,183 INFO [regionserver/ef751fafe6b1:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-10T11:46:31,186 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/ef751fafe6b1,42797,1733831188809 already deleted, retry=false 2024-12-10T11:46:31,186 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; ef751fafe6b1,42797,1733831188809 expired; onlineServers=1 2024-12-10T11:46:31,203 DEBUG [RS_CLOSE_META-regionserver/ef751fafe6b1:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37145/user/jenkins/test-data/0a18477c-72b4-afca-f745-effa86d33fc6/data/hbase/meta/1588230740/.tmp/table/891a1c2e393842e0a7d47d5c1355db90 is 52, key is TestHBaseWalOnEC/table:state/1733831190487/Put/seqid=0 2024-12-10T11:46:31,204 WARN [IPC Server handler 2 on default port 37145 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-10T11:46:31,204 WARN [IPC Server handler 2 on default port 37145 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-10T11:46:31,204 WARN [IPC Server handler 2 on default port 37145 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-10T11:46:31,208 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38591 is added to blk_1073741842_1018 (size=5249) 2024-12-10T11:46:31,209 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34141 is added to blk_1073741842_1018 (size=5249) 2024-12-10T11:46:31,209 INFO [RS_CLOSE_META-regionserver/ef751fafe6b1:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=96 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:37145/user/jenkins/test-data/0a18477c-72b4-afca-f745-effa86d33fc6/data/hbase/meta/1588230740/.tmp/table/891a1c2e393842e0a7d47d5c1355db90 2024-12-10T11:46:31,217 DEBUG [RS_CLOSE_META-regionserver/ef751fafe6b1:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37145/user/jenkins/test-data/0a18477c-72b4-afca-f745-effa86d33fc6/data/hbase/meta/1588230740/.tmp/info/a08d95bb59c54cfeae0f76b9c300b401 as hdfs://localhost:37145/user/jenkins/test-data/0a18477c-72b4-afca-f745-effa86d33fc6/data/hbase/meta/1588230740/info/a08d95bb59c54cfeae0f76b9c300b401 2024-12-10T11:46:31,224 INFO [RS_CLOSE_META-regionserver/ef751fafe6b1:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37145/user/jenkins/test-data/0a18477c-72b4-afca-f745-effa86d33fc6/data/hbase/meta/1588230740/info/a08d95bb59c54cfeae0f76b9c300b401, entries=10, sequenceid=11, filesize=6.5 K 2024-12-10T11:46:31,226 DEBUG [RS_CLOSE_META-regionserver/ef751fafe6b1:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37145/user/jenkins/test-data/0a18477c-72b4-afca-f745-effa86d33fc6/data/hbase/meta/1588230740/.tmp/ns/2cd6e80843d5481abf7a3c888ed0087b as hdfs://localhost:37145/user/jenkins/test-data/0a18477c-72b4-afca-f745-effa86d33fc6/data/hbase/meta/1588230740/ns/2cd6e80843d5481abf7a3c888ed0087b 2024-12-10T11:46:31,233 INFO [RS_CLOSE_META-regionserver/ef751fafe6b1:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37145/user/jenkins/test-data/0a18477c-72b4-afca-f745-effa86d33fc6/data/hbase/meta/1588230740/ns/2cd6e80843d5481abf7a3c888ed0087b, entries=2, sequenceid=11, filesize=5.0 K 2024-12-10T11:46:31,234 DEBUG [RS_CLOSE_META-regionserver/ef751fafe6b1:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37145/user/jenkins/test-data/0a18477c-72b4-afca-f745-effa86d33fc6/data/hbase/meta/1588230740/.tmp/table/891a1c2e393842e0a7d47d5c1355db90 as hdfs://localhost:37145/user/jenkins/test-data/0a18477c-72b4-afca-f745-effa86d33fc6/data/hbase/meta/1588230740/table/891a1c2e393842e0a7d47d5c1355db90 2024-12-10T11:46:31,241 INFO [RS_CLOSE_META-regionserver/ef751fafe6b1:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37145/user/jenkins/test-data/0a18477c-72b4-afca-f745-effa86d33fc6/data/hbase/meta/1588230740/table/891a1c2e393842e0a7d47d5c1355db90, entries=2, sequenceid=11, filesize=5.1 K 2024-12-10T11:46:31,243 INFO [RS_CLOSE_META-regionserver/ef751fafe6b1:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 118ms, sequenceid=11, compaction requested=false 2024-12-10T11:46:31,248 DEBUG [RS_CLOSE_META-regionserver/ef751fafe6b1:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37145/user/jenkins/test-data/0a18477c-72b4-afca-f745-effa86d33fc6/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-12-10T11:46:31,249 DEBUG [RS_CLOSE_META-regionserver/ef751fafe6b1:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-10T11:46:31,249 INFO [RS_CLOSE_META-regionserver/ef751fafe6b1:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-10T11:46:31,249 DEBUG [RS_CLOSE_META-regionserver/ef751fafe6b1:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733831191125Running coprocessor pre-close hooks at 1733831191125Disabling compacts and flushes for region at 1733831191125Disabling writes for close at 1733831191125Obtaining lock to block concurrent updates at 1733831191126 (+1 ms)Preparing flush snapshotting stores in 1588230740 at 1733831191126Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1377, getHeapSize=3392, getOffHeapSize=0, getCellsCount=14 at 1733831191126Flushing stores of hbase:meta,,1.1588230740 at 1733831191128 (+2 ms)Flushing 1588230740/info: creating writer at 1733831191128Flushing 1588230740/info: appending metadata at 1733831191144 (+16 ms)Flushing 1588230740/info: closing flushed file at 1733831191144Flushing 1588230740/ns: creating writer at 1733831191159 (+15 ms)Flushing 1588230740/ns: appending metadata at 1733831191176 (+17 ms)Flushing 1588230740/ns: closing flushed file at 1733831191176Flushing 1588230740/table: creating writer at 1733831191189 (+13 ms)Flushing 1588230740/table: appending metadata at 1733831191203 (+14 ms)Flushing 1588230740/table: closing flushed file at 1733831191203Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@24b31f80: reopening flushed file at 1733831191215 (+12 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1697a1b3: reopening flushed file at 1733831191225 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@57296173: reopening flushed file at 1733831191233 (+8 ms)Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 118ms, sequenceid=11, compaction requested=false at 1733831191243 (+10 ms)Writing region close event to WAL at 1733831191244 (+1 ms)Running coprocessor post-close hooks at 1733831191249 (+5 ms)Closed at 1733831191249 2024-12-10T11:46:31,249 DEBUG [RS_CLOSE_META-regionserver/ef751fafe6b1:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-10T11:46:31,262 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42797-0x1000f9a1f020002, quorum=127.0.0.1:59194, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-10T11:46:31,262 INFO [RS:1;ef751fafe6b1:42797 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-10T11:46:31,262 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42797-0x1000f9a1f020002, quorum=127.0.0.1:59194, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-10T11:46:31,262 INFO [RS:1;ef751fafe6b1:42797 {}] regionserver.HRegionServer(1031): Exiting; stopping=ef751fafe6b1,42797,1733831188809; zookeeper connection closed. 2024-12-10T11:46:31,262 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@49b6574 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@49b6574 2024-12-10T11:46:31,270 INFO [RS:2;ef751fafe6b1:39397 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-10T11:46:31,270 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39397-0x1000f9a1f020003, quorum=127.0.0.1:59194, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-10T11:46:31,270 INFO [RS:2;ef751fafe6b1:39397 {}] regionserver.HRegionServer(1031): Exiting; stopping=ef751fafe6b1,39397,1733831188842; zookeeper connection closed. 2024-12-10T11:46:31,270 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39397-0x1000f9a1f020003, quorum=127.0.0.1:59194, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-10T11:46:31,270 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@33d275d {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@33d275d 2024-12-10T11:46:31,325 INFO [RS:0;ef751fafe6b1:41813 {}] regionserver.HRegionServer(976): stopping server ef751fafe6b1,41813,1733831188771; all regions closed. 2024-12-10T11:46:31,326 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-10T11:46:31,326 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-10T11:46:31,326 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-10T11:46:31,326 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-10T11:46:31,326 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-10T11:46:31,329 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34141 is added to blk_1073741836_1012 (size=2751) 2024-12-10T11:46:31,329 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43989 is added to blk_1073741836_1012 (size=2751) 2024-12-10T11:46:31,330 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38591 is added to blk_1073741836_1012 (size=2751) 2024-12-10T11:46:31,332 DEBUG [RS:0;ef751fafe6b1:41813 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/0a18477c-72b4-afca-f745-effa86d33fc6/oldWALs 2024-12-10T11:46:31,332 INFO [RS:0;ef751fafe6b1:41813 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog ef751fafe6b1%2C41813%2C1733831188771.meta:.meta(num 1733831189899) 2024-12-10T11:46:31,333 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-10T11:46:31,333 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-10T11:46:31,333 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-10T11:46:31,333 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-10T11:46:31,334 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-10T11:46:31,336 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43989 is added to blk_1073741835_1011 (size=1298) 2024-12-10T11:46:31,336 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38591 is added to blk_1073741835_1011 (size=1298) 2024-12-10T11:46:31,337 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34141 is added to blk_1073741835_1011 (size=1298) 2024-12-10T11:46:31,339 DEBUG [RS:0;ef751fafe6b1:41813 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/0a18477c-72b4-afca-f745-effa86d33fc6/oldWALs 2024-12-10T11:46:31,340 INFO [RS:0;ef751fafe6b1:41813 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog ef751fafe6b1%2C41813%2C1733831188771:(num 1733831189527) 2024-12-10T11:46:31,340 DEBUG [RS:0;ef751fafe6b1:41813 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T11:46:31,340 INFO [RS:0;ef751fafe6b1:41813 {}] regionserver.LeaseManager(133): Closed leases 2024-12-10T11:46:31,340 INFO [RS:0;ef751fafe6b1:41813 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-10T11:46:31,340 INFO [RS:0;ef751fafe6b1:41813 {}] hbase.ChoreService(370): Chore service for: regionserver/ef751fafe6b1:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-10T11:46:31,340 INFO [RS:0;ef751fafe6b1:41813 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-10T11:46:31,340 INFO [regionserver/ef751fafe6b1:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-10T11:46:31,340 INFO [RS:0;ef751fafe6b1:41813 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:41813 2024-12-10T11:46:31,366 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41813-0x1000f9a1f020001, quorum=127.0.0.1:59194, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/ef751fafe6b1,41813,1733831188771 2024-12-10T11:46:31,366 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40575-0x1000f9a1f020000, quorum=127.0.0.1:59194, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-10T11:46:31,366 INFO [RS:0;ef751fafe6b1:41813 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-10T11:46:31,374 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [ef751fafe6b1,41813,1733831188771] 2024-12-10T11:46:31,382 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/ef751fafe6b1,41813,1733831188771 already deleted, retry=false 2024-12-10T11:46:31,382 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; ef751fafe6b1,41813,1733831188771 expired; onlineServers=0 2024-12-10T11:46:31,382 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'ef751fafe6b1,40575,1733831188608' ***** 2024-12-10T11:46:31,382 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-10T11:46:31,383 INFO [M:0;ef751fafe6b1:40575 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-10T11:46:31,383 INFO [M:0;ef751fafe6b1:40575 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-10T11:46:31,383 DEBUG [M:0;ef751fafe6b1:40575 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-10T11:46:31,383 DEBUG [M:0;ef751fafe6b1:40575 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-10T11:46:31,383 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-10T11:46:31,383 DEBUG [master/ef751fafe6b1:0:becomeActiveMaster-HFileCleaner.small.0-1733831189235 {}] cleaner.HFileCleaner(306): Exit Thread[master/ef751fafe6b1:0:becomeActiveMaster-HFileCleaner.small.0-1733831189235,5,FailOnTimeoutGroup] 2024-12-10T11:46:31,383 DEBUG [master/ef751fafe6b1:0:becomeActiveMaster-HFileCleaner.large.0-1733831189235 {}] cleaner.HFileCleaner(306): Exit Thread[master/ef751fafe6b1:0:becomeActiveMaster-HFileCleaner.large.0-1733831189235,5,FailOnTimeoutGroup] 2024-12-10T11:46:31,383 INFO [M:0;ef751fafe6b1:40575 {}] hbase.ChoreService(370): Chore service for: master/ef751fafe6b1:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-10T11:46:31,383 INFO [M:0;ef751fafe6b1:40575 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-10T11:46:31,383 DEBUG [M:0;ef751fafe6b1:40575 {}] master.HMaster(1795): Stopping service threads 2024-12-10T11:46:31,383 INFO [M:0;ef751fafe6b1:40575 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-10T11:46:31,384 INFO [M:0;ef751fafe6b1:40575 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-10T11:46:31,384 INFO [M:0;ef751fafe6b1:40575 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-10T11:46:31,384 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-10T11:46:31,391 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40575-0x1000f9a1f020000, quorum=127.0.0.1:59194, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-10T11:46:31,391 DEBUG [M:0;ef751fafe6b1:40575 {}] zookeeper.ZKUtil(347): master:40575-0x1000f9a1f020000, quorum=127.0.0.1:59194, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-10T11:46:31,391 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40575-0x1000f9a1f020000, quorum=127.0.0.1:59194, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T11:46:31,391 WARN [M:0;ef751fafe6b1:40575 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-10T11:46:31,391 INFO [M:0;ef751fafe6b1:40575 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:37145/user/jenkins/test-data/0a18477c-72b4-afca-f745-effa86d33fc6/.lastflushedseqids 2024-12-10T11:46:31,394 WARN [IPC Server handler 2 on default port 37145 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-10T11:46:31,394 WARN [IPC Server handler 2 on default port 37145 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-10T11:46:31,394 WARN [IPC Server handler 2 on default port 37145 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-10T11:46:31,399 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38591 is added to blk_1073741843_1019 (size=127) 2024-12-10T11:46:31,399 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34141 is added to blk_1073741843_1019 (size=127) 2024-12-10T11:46:31,400 INFO [M:0;ef751fafe6b1:40575 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-10T11:46:31,400 INFO [M:0;ef751fafe6b1:40575 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-10T11:46:31,400 DEBUG [M:0;ef751fafe6b1:40575 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-10T11:46:31,400 INFO [M:0;ef751fafe6b1:40575 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-10T11:46:31,400 DEBUG [M:0;ef751fafe6b1:40575 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-10T11:46:31,400 DEBUG [M:0;ef751fafe6b1:40575 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-10T11:46:31,400 DEBUG [M:0;ef751fafe6b1:40575 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-10T11:46:31,400 INFO [M:0;ef751fafe6b1:40575 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=26.83 KB heapSize=34.12 KB 2024-12-10T11:46:31,417 DEBUG [M:0;ef751fafe6b1:40575 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37145/user/jenkins/test-data/0a18477c-72b4-afca-f745-effa86d33fc6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/9caca57af46a4e258abfceca81c0be65 is 82, key is hbase:meta,,1/info:regioninfo/1733831189931/Put/seqid=0 2024-12-10T11:46:31,418 WARN [IPC Server handler 0 on default port 37145 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-10T11:46:31,418 WARN [IPC Server handler 0 on default port 37145 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-10T11:46:31,418 WARN [IPC Server handler 0 on default port 37145 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-10T11:46:31,423 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34141 is added to blk_1073741844_1020 (size=5672) 2024-12-10T11:46:31,423 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38591 is added to blk_1073741844_1020 (size=5672) 2024-12-10T11:46:31,423 INFO [M:0;ef751fafe6b1:40575 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:37145/user/jenkins/test-data/0a18477c-72b4-afca-f745-effa86d33fc6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/9caca57af46a4e258abfceca81c0be65 2024-12-10T11:46:31,452 DEBUG [M:0;ef751fafe6b1:40575 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37145/user/jenkins/test-data/0a18477c-72b4-afca-f745-effa86d33fc6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/bb0428d137ac4ba08684cb171d7a1769 is 748, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1733831190493/Put/seqid=0 2024-12-10T11:46:31,454 WARN [IPC Server handler 1 on default port 37145 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-10T11:46:31,454 WARN [IPC Server handler 1 on default port 37145 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-10T11:46:31,454 WARN [IPC Server handler 1 on default port 37145 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-10T11:46:31,459 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38591 is added to blk_1073741845_1021 (size=6439) 2024-12-10T11:46:31,459 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34141 is added to blk_1073741845_1021 (size=6439) 2024-12-10T11:46:31,459 INFO [M:0;ef751fafe6b1:40575 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.14 KB at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:37145/user/jenkins/test-data/0a18477c-72b4-afca-f745-effa86d33fc6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/bb0428d137ac4ba08684cb171d7a1769 2024-12-10T11:46:31,474 INFO [RS:0;ef751fafe6b1:41813 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-10T11:46:31,474 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41813-0x1000f9a1f020001, quorum=127.0.0.1:59194, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-10T11:46:31,474 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41813-0x1000f9a1f020001, quorum=127.0.0.1:59194, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-10T11:46:31,474 INFO [RS:0;ef751fafe6b1:41813 {}] regionserver.HRegionServer(1031): Exiting; stopping=ef751fafe6b1,41813,1733831188771; zookeeper connection closed. 2024-12-10T11:46:31,475 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@3af49c56 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@3af49c56 2024-12-10T11:46:31,475 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 3 regionserver(s) complete 2024-12-10T11:46:31,480 DEBUG [M:0;ef751fafe6b1:40575 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37145/user/jenkins/test-data/0a18477c-72b4-afca-f745-effa86d33fc6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/6d2b4df55c884a1791404614077dd111 is 69, key is ef751fafe6b1,39397,1733831188842/rs:state/1733831189318/Put/seqid=0 2024-12-10T11:46:31,481 WARN [IPC Server handler 3 on default port 37145 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-10T11:46:31,481 WARN [IPC Server handler 3 on default port 37145 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-10T11:46:31,481 WARN [IPC Server handler 3 on default port 37145 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-10T11:46:31,486 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38591 is added to blk_1073741846_1022 (size=5294) 2024-12-10T11:46:31,486 INFO [M:0;ef751fafe6b1:40575 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=195 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:37145/user/jenkins/test-data/0a18477c-72b4-afca-f745-effa86d33fc6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/6d2b4df55c884a1791404614077dd111 2024-12-10T11:46:31,487 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34141 is added to blk_1073741846_1022 (size=5294) 2024-12-10T11:46:31,495 DEBUG [M:0;ef751fafe6b1:40575 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37145/user/jenkins/test-data/0a18477c-72b4-afca-f745-effa86d33fc6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/9caca57af46a4e258abfceca81c0be65 as hdfs://localhost:37145/user/jenkins/test-data/0a18477c-72b4-afca-f745-effa86d33fc6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/9caca57af46a4e258abfceca81c0be65 2024-12-10T11:46:31,503 INFO [M:0;ef751fafe6b1:40575 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37145/user/jenkins/test-data/0a18477c-72b4-afca-f745-effa86d33fc6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/9caca57af46a4e258abfceca81c0be65, entries=8, sequenceid=72, filesize=5.5 K 2024-12-10T11:46:31,504 DEBUG [M:0;ef751fafe6b1:40575 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37145/user/jenkins/test-data/0a18477c-72b4-afca-f745-effa86d33fc6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/bb0428d137ac4ba08684cb171d7a1769 as hdfs://localhost:37145/user/jenkins/test-data/0a18477c-72b4-afca-f745-effa86d33fc6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/bb0428d137ac4ba08684cb171d7a1769 2024-12-10T11:46:31,511 INFO [M:0;ef751fafe6b1:40575 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37145/user/jenkins/test-data/0a18477c-72b4-afca-f745-effa86d33fc6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/bb0428d137ac4ba08684cb171d7a1769, entries=8, sequenceid=72, filesize=6.3 K 2024-12-10T11:46:31,513 DEBUG [M:0;ef751fafe6b1:40575 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37145/user/jenkins/test-data/0a18477c-72b4-afca-f745-effa86d33fc6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/6d2b4df55c884a1791404614077dd111 as hdfs://localhost:37145/user/jenkins/test-data/0a18477c-72b4-afca-f745-effa86d33fc6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/6d2b4df55c884a1791404614077dd111 2024-12-10T11:46:31,519 INFO [M:0;ef751fafe6b1:40575 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37145/user/jenkins/test-data/0a18477c-72b4-afca-f745-effa86d33fc6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/6d2b4df55c884a1791404614077dd111, entries=3, sequenceid=72, filesize=5.2 K 2024-12-10T11:46:31,521 INFO [M:0;ef751fafe6b1:40575 {}] regionserver.HRegion(3140): Finished flush of dataSize ~26.83 KB/27471, heapSize ~33.82 KB/34632, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 121ms, sequenceid=72, compaction requested=false 2024-12-10T11:46:31,522 INFO [M:0;ef751fafe6b1:40575 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-10T11:46:31,522 DEBUG [M:0;ef751fafe6b1:40575 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733831191400Disabling compacts and flushes for region at 1733831191400Disabling writes for close at 1733831191400Obtaining lock to block concurrent updates at 1733831191400Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733831191400Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=27471, getHeapSize=34872, getOffHeapSize=0, getCellsCount=85 at 1733831191401 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733831191401Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733831191402 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733831191416 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733831191416Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733831191431 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733831191452 (+21 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733831191452Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733831191465 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733831191479 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733831191479Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@40b80914: reopening flushed file at 1733831191494 (+15 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5a0f3d0: reopening flushed file at 1733831191503 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@45b31594: reopening flushed file at 1733831191511 (+8 ms)Finished flush of dataSize ~26.83 KB/27471, heapSize ~33.82 KB/34632, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 121ms, sequenceid=72, compaction requested=false at 1733831191521 (+10 ms)Writing region close event to WAL at 1733831191522 (+1 ms)Closed at 1733831191522 2024-12-10T11:46:31,524 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-10T11:46:31,524 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-10T11:46:31,524 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-10T11:46:31,524 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-10T11:46:31,524 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-10T11:46:31,526 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43989 is added to blk_1073741830_1006 (size=32674) 2024-12-10T11:46:31,527 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38591 is added to blk_1073741830_1006 (size=32674) 2024-12-10T11:46:31,527 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34141 is added to blk_1073741830_1006 (size=32674) 2024-12-10T11:46:31,528 INFO [M:0;ef751fafe6b1:40575 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-10T11:46:31,528 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-10T11:46:31,528 INFO [M:0;ef751fafe6b1:40575 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:40575 2024-12-10T11:46:31,528 INFO [M:0;ef751fafe6b1:40575 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-10T11:46:31,653 INFO [M:0;ef751fafe6b1:40575 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-10T11:46:31,653 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40575-0x1000f9a1f020000, quorum=127.0.0.1:59194, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-10T11:46:31,654 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40575-0x1000f9a1f020000, quorum=127.0.0.1:59194, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-10T11:46:31,657 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@bff0a43{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-10T11:46:31,657 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@19dff04d{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-10T11:46:31,657 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-10T11:46:31,657 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@38da8210{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-10T11:46:31,658 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6e0095f0{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1fe8c5e9-199c-4b47-c3e3-1284748a1506/hadoop.log.dir/,STOPPED} 2024-12-10T11:46:31,659 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-10T11:46:31,659 WARN [BP-21781453-172.17.0.2-1733831186620 heartbeating to localhost/127.0.0.1:37145 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-10T11:46:31,659 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-10T11:46:31,659 WARN [BP-21781453-172.17.0.2-1733831186620 heartbeating to localhost/127.0.0.1:37145 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-21781453-172.17.0.2-1733831186620 (Datanode Uuid eb0cb4bc-7b70-4d7c-b175-c81d69ffe028) service to localhost/127.0.0.1:37145 2024-12-10T11:46:31,660 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1fe8c5e9-199c-4b47-c3e3-1284748a1506/cluster_65dad53e-0639-7dd9-5754-38c942081c2f/data/data5/current/BP-21781453-172.17.0.2-1733831186620 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-10T11:46:31,660 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1fe8c5e9-199c-4b47-c3e3-1284748a1506/cluster_65dad53e-0639-7dd9-5754-38c942081c2f/data/data6/current/BP-21781453-172.17.0.2-1733831186620 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-10T11:46:31,661 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-10T11:46:31,663 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6e5e4927{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-10T11:46:31,663 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1768a8c1{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-10T11:46:31,663 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-10T11:46:31,664 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@bb1336{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-10T11:46:31,664 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5b4297c4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1fe8c5e9-199c-4b47-c3e3-1284748a1506/hadoop.log.dir/,STOPPED} 2024-12-10T11:46:31,665 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-10T11:46:31,665 WARN [BP-21781453-172.17.0.2-1733831186620 heartbeating to localhost/127.0.0.1:37145 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-10T11:46:31,665 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-10T11:46:31,666 WARN [BP-21781453-172.17.0.2-1733831186620 heartbeating to localhost/127.0.0.1:37145 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-21781453-172.17.0.2-1733831186620 (Datanode Uuid cd33c8e5-3924-48e2-87e7-991dc27a2de4) service to localhost/127.0.0.1:37145 2024-12-10T11:46:31,666 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1fe8c5e9-199c-4b47-c3e3-1284748a1506/cluster_65dad53e-0639-7dd9-5754-38c942081c2f/data/data3/current/BP-21781453-172.17.0.2-1733831186620 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-10T11:46:31,667 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1fe8c5e9-199c-4b47-c3e3-1284748a1506/cluster_65dad53e-0639-7dd9-5754-38c942081c2f/data/data4/current/BP-21781453-172.17.0.2-1733831186620 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-10T11:46:31,667 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-10T11:46:31,669 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@38e5384{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-10T11:46:31,670 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7d6118e0{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-10T11:46:31,670 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-10T11:46:31,670 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@61a92fea{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-10T11:46:31,670 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2807f8c2{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1fe8c5e9-199c-4b47-c3e3-1284748a1506/hadoop.log.dir/,STOPPED} 2024-12-10T11:46:31,671 WARN [BP-21781453-172.17.0.2-1733831186620 heartbeating to localhost/127.0.0.1:37145 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-10T11:46:31,671 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-10T11:46:31,671 WARN [BP-21781453-172.17.0.2-1733831186620 heartbeating to localhost/127.0.0.1:37145 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-21781453-172.17.0.2-1733831186620 (Datanode Uuid ffcc43a6-daf2-4943-8178-3452b18ebd17) service to localhost/127.0.0.1:37145 2024-12-10T11:46:31,671 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-10T11:46:31,672 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1fe8c5e9-199c-4b47-c3e3-1284748a1506/cluster_65dad53e-0639-7dd9-5754-38c942081c2f/data/data1/current/BP-21781453-172.17.0.2-1733831186620 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-10T11:46:31,672 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1fe8c5e9-199c-4b47-c3e3-1284748a1506/cluster_65dad53e-0639-7dd9-5754-38c942081c2f/data/data2/current/BP-21781453-172.17.0.2-1733831186620 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-10T11:46:31,672 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-10T11:46:31,678 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7aaeb6cf{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-10T11:46:31,678 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@686c9dd5{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-10T11:46:31,678 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-10T11:46:31,678 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@56aa9d3b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-10T11:46:31,679 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@a49b909{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1fe8c5e9-199c-4b47-c3e3-1284748a1506/hadoop.log.dir/,STOPPED} 2024-12-10T11:46:31,685 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-10T11:46:31,711 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-10T11:46:31,717 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestHBaseWalOnEC#testReadWrite[1] Thread=146 (was 86) - Thread LEAK? -, OpenFileDescriptor=518 (was 439) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=208 (was 174) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=6874 (was 7090)